diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c index 1fd3adef738e..69d0b5c00848 100644 --- a/sys/dev/ath/if_ath_tx.c +++ b/sys/dev/ath/if_ath_tx.c @@ -1,6414 +1,6414 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #ifdef ATH_TX99_DIAG #include #endif #include #include #include #ifdef ATH_DEBUG_ALQ #include #endif /* * How many retries to perform in software */ #define SWMAX_RETRIES 10 /* * What queue to throw the non-QoS TID traffic into */ #define ATH_NONQOS_TID_AC WME_AC_VO #if 0 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); #endif static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid); static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid); static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid); static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf); #ifdef ATH_DEBUG_ALQ void ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf; int i, n; const char *ds; /* XXX we should skip out early if debugging isn't enabled! */ bf = bf_first; while (bf != NULL) { /* XXX should ensure bf_nseg > 0! */ if (bf->bf_nseg == 0) break; n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; for (i = 0, ds = (const char *) bf->bf_desc; i < n; i++, ds += sc->sc_tx_desclen) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC, sc->sc_tx_desclen, ds); } bf = bf->bf_next; } } #endif /* ATH_DEBUG_ALQ */ /* * Whether to use the 11n rate scenario functions or not */ static inline int ath_tx_is_11n(struct ath_softc *sc) { return ((sc->sc_ah->ah_magic == 0x20065416) || (sc->sc_ah->ah_magic == 0x19741014)); } /* * Obtain the current TID from the given frame. * * Non-QoS frames get mapped to a TID so frames consistently * go on a sensible queue. */ static int ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; wh = mtod(m0, const struct ieee80211_frame *); /* Non-QoS: map frame to a TID queue for software queueing */ if (! IEEE80211_QOS_HAS_SEQ(wh)) return (WME_AC_TO_TID(M_WME_GETAC(m0))); /* QoS - fetch the TID from the header, ignore mbuf WME */ return (ieee80211_gettid(wh)); } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* Only update/resync if needed */ if (bf->bf_state.bfs_isretried == 0) { wh->i_fc[1] |= IEEE80211_FC1_RETRY; bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } bf->bf_state.bfs_isretried = 1; bf->bf_state.bfs_retries ++; } /* * Determine what the correct AC queue for the given frame * should be. * * For QoS frames, obey the TID. That way things like * management frames that are related to a given TID * are thus serialised with the rest of the TID traffic, * regardless of net80211 overriding priority. * * For non-QoS frames, return the mbuf WMI priority. * * This has implications that higher priority non-QoS traffic * may end up being scheduled before other non-QoS traffic, * leading to out-of-sequence packets being emitted. * * (It'd be nice to log/count this so we can see if it * really is a problem.) * * TODO: maybe we should throw multicast traffic, QoS or * otherwise, into a separate TX queue? */ static int ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; wh = mtod(m0, const struct ieee80211_frame *); /* * QoS data frame (sequence number or otherwise) - * return hardware queue mapping for the underlying * TID. */ if (IEEE80211_QOS_HAS_SEQ(wh)) return TID_TO_WME_AC(ieee80211_gettid(wh)); /* * Otherwise - return mbuf QoS pri. */ return (M_WME_GETAC(m0)); } void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags, struct ieee80211_node *ni) { struct ath_buf *bf, *next; ATH_TXBUF_LOCK_ASSERT(sc); TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { /* NB: bf assumed clean */ TAILQ_REMOVE(frags, bf, bf_list); ath_returnbuf_head(sc, bf); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer * for each frag and bump the node reference count to * reflect the held reference to be setup by ath_tx_start. */ int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct ath_buf *bf; ATH_TXBUF_LOCK(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { /* XXX non-management? */ bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); if (bf == NULL) { /* out of buffers, cleanup */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", __func__); ath_txfrag_cleanup(sc, frags, ni); break; } - ieee80211_node_incref(ni); + (void) ieee80211_ref_node(ni); TAILQ_INSERT_TAIL(frags, bf, bf_list); } ATH_TXBUF_UNLOCK(sc); return !TAILQ_EMPTY(frags); } static int ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = ATH_MAX_SCATTER + 1; } else if (error != 0) { sc->sc_stats.ast_tx_busdma++; ieee80211_free_mbuf(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); if (m == NULL) { ieee80211_free_mbuf(m0); sc->sc_stats.ast_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.ast_tx_busdma++; ieee80211_free_mbuf(m0); return error; } KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.ast_tx_nodata++; ieee80211_free_mbuf(m0); return EIO; } DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } /* * Chain together segments+descriptors for a frame - 11n or otherwise. * * For aggregates, this is called on each frame in the aggregate. */ static void ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, struct ath_buf *bf, bool is_aggr, int is_first_subframe, int is_last_subframe) { struct ath_hal *ah = sc->sc_ah; char *ds; int i, bp, dsp; HAL_DMA_ADDR bufAddrList[4]; uint32_t segLenList[4]; int numTxMaps = 1; int isFirstDesc = 1; /* * XXX There's txdma and txdma_mgmt; the descriptor * sizes must match. */ struct ath_descdma *dd = &sc->sc_txdma; /* * Fillin the remainder of the descriptor info. */ /* * We need the number of TX data pointers in each descriptor. * EDMA and later chips support 4 TX buffers per descriptor; * previous chips just support one. */ numTxMaps = sc->sc_tx_nmaps; /* * For EDMA and later chips ensure the TX map is fully populated * before advancing to the next descriptor. */ ds = (char *) bf->bf_desc; bp = dsp = 0; bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); for (i = 0; i < bf->bf_nseg; i++) { bufAddrList[bp] = bf->bf_segs[i].ds_addr; segLenList[bp] = bf->bf_segs[i].ds_len; bp++; /* * Go to the next segment if this isn't the last segment * and there's space in the current TX map. */ if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) continue; /* * Last segment or we're out of buffer pointers. */ bp = 0; if (i == bf->bf_nseg - 1) ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); else ath_hal_settxdesclink(ah, (struct ath_desc *) ds, bf->bf_daddr + dd->dd_descsize * (dsp + 1)); /* * XXX This assumes that bfs_txq is the actual destination * hardware queue at this point. It may not have been * assigned, it may actually be pointing to the multicast * software TXQ id. These must be fixed! */ ath_hal_filltxdesc(ah, (struct ath_desc *) ds , bufAddrList , segLenList , bf->bf_descid /* XXX desc id */ , bf->bf_state.bfs_tx_queue , isFirstDesc /* first segment */ , i == bf->bf_nseg - 1 /* last segment */ , (struct ath_desc *) ds0 /* first descriptor */ ); /* * Make sure the 11n aggregate fields are cleared. * * XXX TODO: this doesn't need to be called for * aggregate frames; as it'll be called on all * sub-frames. Since the descriptors are in * non-cacheable memory, this leads to some * rather slow writes on MIPS/ARM platforms. */ if (ath_tx_is_11n(sc)) ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); /* * If 11n is enabled, set it up as if it's an aggregate * frame. */ if (is_last_subframe) { ath_hal_set11n_aggr_last(sc->sc_ah, (struct ath_desc *) ds); } else if (is_aggr) { /* * This clears the aggrlen field; so * the caller needs to call set_aggr_first()! * * XXX TODO: don't call this for the first * descriptor in the first frame in an * aggregate! */ ath_hal_set11n_aggr_middle(sc->sc_ah, (struct ath_desc *) ds, bf->bf_state.bfs_ndelim); } isFirstDesc = 0; bf->bf_lastds = (struct ath_desc *) ds; /* * Don't forget to skip to the next descriptor. */ ds += sc->sc_tx_desclen; dsp++; /* * .. and don't forget to blank these out! */ bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } /* * Set the rate control fields in the given descriptor based on * the bf_state fields and node state. * * The bfs fields should already be set with the relevant rate * control information, including whether MRR is to be enabled. * * Since the FreeBSD HAL currently sets up the first TX rate * in ath_hal_setuptxdesc(), this will setup the MRR * conditionally for the pre-11n chips, and call ath_buf_set_rate * unconditionally for 11n chips. These require the 11n rate * scenario to be set if MCS rates are enabled, so it's easier * to just always call it. The caller can then only set rates 2, 3 * and 4 if multi-rate retry is needed. */ static void ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf) { struct ath_rc_series *rc = bf->bf_state.bfs_rc; /* If mrr is disabled, blank tries 1, 2, 3 */ if (! bf->bf_state.bfs_ismrr) rc[1].tries = rc[2].tries = rc[3].tries = 0; #if 0 /* * If NOACK is set, just set ntries=1. */ else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { rc[1].tries = rc[2].tries = rc[3].tries = 0; rc[0].tries = 1; } #endif /* * Always call - that way a retried descriptor will * have the MRR fields overwritten. * * XXX TODO: see if this is really needed - setting up * the first descriptor should set the MRR fields to 0 * for us anyway. */ if (ath_tx_is_11n(sc)) { ath_buf_set_rate(sc, ni, bf); } else { ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc , rc[1].ratecode, rc[1].tries , rc[2].ratecode, rc[2].tries , rc[3].ratecode, rc[3].tries ); } } /* * Setup segments+descriptors for an 11n aggregate. * bf_first is the first buffer in the aggregate. * The descriptor list must already been linked together using * bf->bf_next. */ static void ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_prev = NULL; struct ath_desc *ds0 = bf_first->bf_desc; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", __func__, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_al); bf = bf_first; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); if (bf->bf_state.bfs_rc[0].ratecode == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", __func__, bf, 0); /* * Setup all descriptors of all subframes - this will * call ath_hal_set11naggrmiddle() on every frame. */ while (bf != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, SEQNO(bf->bf_state.bfs_seqno)); /* * Setup the initial fields for the first descriptor - all * the non-11n specific stuff. */ ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * First descriptor? Setup the rate control and initial * aggregate header information. */ if (bf == bf_first) { /* * setup first desc with rate and aggr info */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); } /* * Setup the descriptors for a multi-descriptor frame. * This is both aggregate and non-aggregate aware. */ ath_tx_chaindesclist(sc, ds0, bf, 1, /* is_aggr */ !! (bf == bf_first), /* is_first_subframe */ !! (bf->bf_next == NULL) /* is_last_subframe */ ); if (bf == bf_first) { /* * Initialise the first 11n aggregate with the * aggregate length and aggregate enable bits. */ ath_hal_set11n_aggr_first(sc->sc_ah, ds0, bf->bf_state.bfs_al, bf->bf_state.bfs_ndelim); } /* * Link the last descriptor of the previous frame * to the beginning descriptor of this frame. */ if (bf_prev != NULL) ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, bf->bf_daddr); /* Save a copy so we can link the next descriptor in */ bf_prev = bf; bf = bf->bf_next; } /* * Set the first descriptor bf_lastds field to point to * the last descriptor in the last subframe, that's where * the status update will occur. */ bf_first->bf_lastds = bf_prev->bf_lastds; /* * And bf_last in the first descriptor points to the end of * the aggregate list. */ bf_first->bf_last = bf_prev; /* * For non-AR9300 NICs, which require the rate control * in the final descriptor - let's set that up now. * * This is because the filltxdesc() HAL call doesn't * populate the last segment with rate control information * if firstSeg is also true. For non-aggregate frames * that is fine, as the first frame already has rate control * info. But if the last frame in an aggregate has one * descriptor, both firstseg and lastseg will be true and * the rate info isn't copied. * * This is inefficient on MIPS/ARM platforms that have * non-cachable memory for TX descriptors, but we'll just * make do for now. * * As to why the rate table is stashed in the last descriptor * rather than the first descriptor? Because proctxdesc() * is called on the final descriptor in an MPDU or A-MPDU - * ie, the one that gets updated by the hardware upon * completion. That way proctxdesc() doesn't need to know * about the first _and_ last TX descriptor. */ ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); } /* * Hand-off a frame to the multicast TX queue. * * This is a software TXQ which will be appended to the CAB queue * during the beacon setup code. * * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated * with the actual hardware txq, or all of this will fall apart. * * XXX It may not be a bad idea to just stuff the QCU ID into bf_state * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated * correctly. */ static void ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); /* * Ensure that the tx queue is the cabq, so things get * mapped correctly. */ if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } ATH_TXQ_LOCK(txq); if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); struct ieee80211_frame *wh; /* mark previous frame */ wh = mtod(bf_last->bf_m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, BUS_DMASYNC_PREWRITE); /* link descriptor */ ath_hal_settxdesclink(sc->sc_ah, bf_last->bf_lastds, bf->bf_daddr); } ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_TXQ_UNLOCK(txq); } /* * Hand-off packet to a hardware queue. */ static void ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf_first; /* * Insert the frame on the outbound list and pass it on * to the hardware. Multicast frames buffered for power * save stations and transmit from the CAB queue are stored * on a s/w only queue and loaded on to the CAB queue in * the SWBA handler since frames only go out on DTIM and * to avoid possible races. */ ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, ("ath_tx_handoff_hw called for mcast queue")); /* * XXX We should instead just verify that sc_txstart_cnt * or ath_txproc_cnt > 0. That would mean that * the reset is going to be waiting for us to complete. */ if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { device_printf(sc->sc_dev, "%s: TX dispatch without holding txcount/txstart refcnt!\n", __func__); } /* * XXX .. this is going to cause the hardware to get upset; * so we really should find some way to drop or queue * things. */ ATH_TXQ_LOCK(txq); /* * XXX TODO: if there's a holdingbf, then * ATH_TXQ_PUTRUNNING should be clear. * * If there is a holdingbf and the list is empty, * then axq_link should be pointing to the holdingbf. * * Otherwise it should point to the last descriptor * in the last ath_buf. * * In any case, we should really ensure that we * update the previous descriptor link pointer to * this descriptor, regardless of all of the above state. * * For now this is captured by having axq_link point * to either the holdingbf (if the TXQ list is empty) * or the end of the list (if the TXQ list isn't empty.) * I'd rather just kill axq_link here and do it as above. */ /* * Append the frame to the TX queue. */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_KTR(sc, ATH_KTR_TX, 3, "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " "depth=%d", txq->axq_qnum, bf, txq->axq_depth); /* * If there's a link pointer, update it. * * XXX we should replace this with the above logic, just * to kill axq_link with fire. */ if (txq->axq_link != NULL) { *txq->axq_link = bf->bf_daddr; DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " "lastds=%d", txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, bf->bf_lastds); } /* * If we've not pushed anything into the hardware yet, * push the head of the queue into the TxDP. * * Once we've started DMA, there's no guarantee that * updating the TxDP with a new value will actually work. * So we just don't do that - if we hit the end of the list, * we keep that buffer around (the "holding buffer") and * re-start DMA by updating the link pointer of _that_ * descriptor and then restart DMA. */ if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { bf_first = TAILQ_FIRST(&txq->axq_q); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: TXDP[%u] = %p (%p) " "lastds=%p depth %d", txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, bf_first->bf_lastds, txq->axq_depth); } /* * Ensure that the bf TXQ matches this TXQ, so later * checking and holding buffer manipulation is sane. */ if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } /* * Track aggregate queue depth. */ if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth++; /* * Update the link pointer. */ ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); /* * Start DMA. * * If we wrote a TxDP above, DMA will start from here. * * If DMA is running, it'll do nothing. * * If the DMA engine hit the end of the QCU list (ie LINK=NULL, * or VEOL) then it stops at the last transmitted write. * We then append a new frame by updating the link pointer * in that descriptor and then kick TxE here; it will re-read * that last descriptor and find the new descriptor to transmit. * * This is why we keep the holding descriptor around. */ ath_hal_txstart(ah, txq->axq_qnum); ATH_TXQ_UNLOCK(txq); ATH_KTR(sc, ATH_KTR_TX, 1, "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); } /* * Restart TX DMA for the given TXQ. * * This must be called whether the queue is empty or not. */ static void ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf, *bf_last; ATH_TXQ_LOCK_ASSERT(txq); /* XXX make this ATH_TXQ_FIRST */ bf = TAILQ_FIRST(&txq->axq_q); bf_last = ATH_TXQ_LAST(txq, axq_q_s); if (bf == NULL) return; DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", __func__, txq->axq_qnum, bf, bf_last, (uint32_t) bf->bf_daddr); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_tx_dump(sc, txq); #endif /* * This is called from a restart, so DMA is known to be * completely stopped. */ KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), ("%s: Q%d: called with PUTRUNNING=1\n", __func__, txq->axq_qnum)); ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, &txq->axq_link); ath_hal_txstart(sc->sc_ah, txq->axq_qnum); } /* * Hand off a packet to the hardware (or mcast queue.) * * The relevant hardware txq should be locked. */ static void ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bf); #endif if (txq->axq_qnum == ATH_TXQ_SWQ) ath_tx_handoff_mcast(sc, txq, bf); else ath_tx_handoff_hw(sc, txq, bf); } static int ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", __func__, *hdrlen, *pktlen, isfrag, iswep, m0); if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ return (0); } /* * Adjust the packet + header lengths for the crypto * additions and calculate the h/w key index. When * a s/w mic is done the frame will have had any mic * added to it prior to entry so m0->m_pkthdr.len will * account for it. Otherwise we need to add it to the * packet length. */ cip = k->wk_cipher; (*hdrlen) += cip->ic_header; (*pktlen) += cip->ic_header + cip->ic_trailer; /* NB: frags always have any TKIP MIC done in s/w */ if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) (*pktlen) += cip->ic_miclen; (*keyix) = k->wk_keyix; } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { /* * Use station key cache slot, if assigned. */ (*keyix) = ni->ni_ucastkey.wk_keyix; if ((*keyix) == IEEE80211_KEYIX_NONE) (*keyix) = HAL_TXKEYIX_INVALID; } else (*keyix) = HAL_TXKEYIX_INVALID; return (1); } /* * Calculate whether interoperability protection is required for * this frame. * * This requires the rate control information be filled in, * as the protection requirement depends upon the current * operating mode / PHY. */ static void ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; const HAL_RATE_TABLE *rt = sc->sc_currates; struct ieee80211com *ic = &sc->sc_ic; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* Disable frame protection for TOA probe frames */ if (bf->bf_flags & ATH_BUF_TOA_PROBE) { /* XXX count */ flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA); bf->bf_state.bfs_doprot = 0; goto finish; } /* * If 802.11g protection is enabled, determine whether * to use RTS/CTS or just CTS. Note that this is only * done for OFDM unicast frames. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) && rt->info[rix].phy == IEEE80211_T_OFDM && (flags & HAL_TXDESC_NOACK) == 0) { bf->bf_state.bfs_doprot = 1; /* XXX fragments must use CCK rates w/ protection */ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { flags |= HAL_TXDESC_RTSENA; } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { flags |= HAL_TXDESC_CTSENA; } /* * For frags it would be desirable to use the * highest CCK rate for RTS/CTS. But stations * farther away may detect it at a lower CCK rate * so use the configured protection rate instead * (for now). */ sc->sc_stats.ast_tx_protect++; } /* * If 11n protection is enabled and it's a HT frame, * enable RTS. * * XXX ic_htprotmode or ic_curhtprotmode? * XXX should it_htprotmode only matter if ic_curhtprotmode * XXX indicates it's not a HT pure environment? */ if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && rt->info[rix].phy == IEEE80211_T_HT && (flags & HAL_TXDESC_NOACK) == 0) { flags |= HAL_TXDESC_RTSENA; sc->sc_stats.ast_tx_htprotect++; } finish: bf->bf_state.bfs_txflags = flags; } /* * Update the frame duration given the currently selected rate. * * This also updates the frame duration value, so it will require * a DMA flush. */ static void ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt = sc->sc_currates; int isfrag = bf->bf_m->m_flags & M_FRAG; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Calculate duration. This logically belongs in the 802.11 * layer but it lacks sufficient information to calculate it. */ if ((flags & HAL_TXDESC_NOACK) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { u_int16_t dur; if (shortPreamble) dur = rt->info[rix].spAckDuration; else dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only * the ACK duration * * XXX TODO: ensure that the rate lookup for each * fragment is the same as the rate used by the * first fragment! */ dur += ath_hal_computetxtime(ah, rt, bf->bf_nextfraglen, rix, shortPreamble, AH_TRUE); } if (isfrag) { /* * Force hardware to use computed duration for next * fragment by disabling multi-rate retry which updates * duration based on the multi-rate duration table. */ bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = ATH_TXMGTTRY; /* XXX update bfs_rc[0].try? */ } /* Update the duration field itself */ *(u_int16_t *)wh->i_dur = htole16(dur); } } static uint8_t ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, int cix, int shortPreamble) { uint8_t ctsrate; /* * CTS transmit rate is derived from the transmit rate * by looking in the h/w rate table. We must also factor * in whether or not a short preamble is to be used. */ /* NB: cix is set above where RTS/CTS is enabled */ KASSERT(cix != 0xff, ("cix not setup")); ctsrate = rt->info[cix].rateCode; /* XXX this should only matter for legacy rates */ if (shortPreamble) ctsrate |= rt->info[cix].shortPreamble; return (ctsrate); } /* * Calculate the RTS/CTS duration for legacy frames. */ static int ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, int flags) { int ctsduration = 0; /* This mustn't be called for HT modes */ if (rt->info[cix].phy == IEEE80211_T_HT) { printf("%s: HT rate where it shouldn't be (0x%x)\n", __func__, rt->info[cix].rateCode); return (-1); } /* * Compute the transmit duration based on the frame * size and the size of an ACK frame. We call into the * HAL to do the computation since it depends on the * characteristics of the actual PHY being used. * * NB: CTS is assumed the same size as an ACK so we can * use the precalculated ACK durations. */ if (shortPreamble) { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].lpAckDuration; } return (ctsduration); } /* * Update the given ath_buf with updated rts/cts setup and duration * values. * * To support rate lookups for each software retry, the rts/cts rate * and cts duration must be re-calculated. * * This function assumes the RTS/CTS flags have been set as needed; * mrr has been disabled; and the rate control lookup has been done. * * XXX TODO: MRR need only be disabled for the pre-11n NICs. * XXX The 11n NICs support per-rate RTS/CTS configuration. */ static void ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) { uint16_t ctsduration = 0; uint8_t ctsrate = 0; uint8_t rix = bf->bf_state.bfs_rc[0].rix; uint8_t cix = 0; const HAL_RATE_TABLE *rt = sc->sc_currates; /* * No RTS/CTS enabled? Don't bother. */ if ((bf->bf_state.bfs_txflags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { /* XXX is this really needed? */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; return; } /* * If protection is enabled, use the protection rix control * rate. Otherwise use the rate0 control rate. */ if (bf->bf_state.bfs_doprot) rix = sc->sc_protrix; else rix = bf->bf_state.bfs_rc[0].rix; /* * If the raw path has hard-coded ctsrate0 to something, * use it. */ if (bf->bf_state.bfs_ctsrate0 != 0) cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); else /* Control rate from above */ cix = rt->info[rix].controlRate; /* Calculate the rtscts rate for the given cix */ ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, bf->bf_state.bfs_shpream); /* The 11n chipsets do ctsduration calculations for you */ if (! ath_tx_is_11n(sc)) ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, rt, bf->bf_state.bfs_txflags); /* Squirrel away in ath_buf */ bf->bf_state.bfs_ctsrate = ctsrate; bf->bf_state.bfs_ctsduration = ctsduration; /* * Must disable multi-rate retry when using RTS/CTS. */ if (!sc->sc_mrrprot) { bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ } } /* * Setup the descriptor chain for a normal or fast-frame * frame. * * XXX TODO: extend to include the destination hardware QCU ID. * Make sure that is correct. Make sure that when being added * to the mcastq, the CABQ QCUID is set or things will get a bit * odd. */ static void ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) { struct ath_desc *ds = bf->bf_desc; struct ath_hal *ah = sc->sc_ah; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); ath_hal_setuptxdesc(ah, ds , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * This will be overridden when the descriptor chain is written. */ bf->bf_lastds = ds; bf->bf_last = bf; /* Set rate control and descriptor chain for this frame */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); } /* * Do a rate lookup. * * This performs a rate lookup for the given ath_buf only if it's required. * Non-data frames and raw frames don't require it. * * This populates the primary and MRR entries; MRR values are * then disabled later on if something requires it (eg RTS/CTS on * pre-11n chipsets. * * This needs to be done before the RTS/CTS fields are calculated * as they may depend upon the rate chosen. */ static void ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid, int pktlen, int is_aggr) { uint8_t rate, rix; int try0; int maxdur; // Note: Unused for now int maxpktlen; if (! bf->bf_state.bfs_doratelookup) return; /* Get rid of any previous state */ bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen); /* In case MRR is disabled, make sure rc[0] is setup correctly */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].ratecode = rate; bf->bf_state.bfs_rc[0].tries = try0; if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, is_aggr, bf->bf_state.bfs_rc); ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); sc->sc_txrix = rix; /* for LED blinking */ sc->sc_lastdatarix = rix; /* for fast frames */ bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_txrate0 = rate; bf->bf_state.bfs_rc_maxpktlen = maxpktlen; } /* * Update the CLRDMASK bit in the ath_buf if it needs to be set. */ static void ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); ATH_TX_LOCK_ASSERT(sc); if (an->clrdmask == 1) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; an->clrdmask = 0; } } /* * Return whether this frame should be software queued or * direct dispatched. * * When doing powersave, BAR frames should be queued but other management * frames should be directly sent. * * When not doing powersave, stick BAR frames into the hardware queue * so it goes out even though the queue is paused. * * For now, management frames are also software queued by default. */ static int ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, struct mbuf *m0, int *queue_to_head) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_frame *wh; uint8_t type, subtype; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; (*queue_to_head) = 0; /* If it's not in powersave - direct-dispatch BAR */ if ((ATH_NODE(ni)->an_is_powersave == 0) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: BAR: TX'ing direct\n", __func__); return (0); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { /* BAR TX whilst asleep; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq: TX'ing\n", __func__); (*queue_to_head) = 1; return (1); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && (type == IEEE80211_FC0_TYPE_MGT || type == IEEE80211_FC0_TYPE_CTL)) { /* * Other control/mgmt frame; bypass software queuing * for now! */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: Node is asleep; sending mgmt " "(type=%d, subtype=%d)\n", __func__, ni->ni_macaddr, ":", type, subtype); return (0); } else { return (1); } } /* * Transmit the given frame to the hardware. * * The frame must already be setup; rate control must already have * been done. * * XXX since the TXQ lock is being held here (and I dislike holding * it for this long when not doing software aggregation), later on * break this function into "setup_normal" and "xmit_normal". The * lock only needs to be held for the ath_tx_handoff call. * * XXX we don't update the leak count here - if we're doing * direct frame dispatch, we need to be able to do it without * decrementing the leak count (eg multicast queue frames.) */ static void ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; ATH_TX_LOCK_ASSERT(sc); /* * For now, just enable CLRDMASK. ath_tx_xmit_normal() does * set a completion handler however it doesn't (yet) properly * handle the strict ordering requirements needed for normal, * non-aggregate session frames. * * Once this is implemented, only set CLRDMASK like this for * frames that must go out - eg management/raw frames. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Setup the descriptor before handoff */ ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Assign the completion handler */ bf->bf_comp = ath_tx_normal_comp; /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Do the basic frame setup stuff that's required before the frame * is added to a software queue. * * All frames get mostly the same treatment and it's done once. * Retransmits fiddle with things like the rate control setup, * setting the retransmit bit in the packet; doing relevant DMA/bus * syncing and relinking it (back) into the hardware TX queue. * * Note that this may cause the mbuf to be reallocated, so * m0 may not be valid. */ static int ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; int error, iswep, ismcast, isfrag, ismrr; int keyix, hdrlen, pktlen, try0 = 0; u_int8_t rix = 0, txrate = 0; struct ath_desc *ds; struct ieee80211_frame *wh; u_int subtype, flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; HAL_BOOL shortPreamble; struct ath_node *an; /* XXX TODO: this pri is only used for non-QoS check, right? */ u_int pri; /* * To ensure that both sequence numbers and the CCMP PN handling * is "correct", make sure that the relevant TID queue is locked. * Otherwise the CCMP PN and seqno may appear out of order, causing * re-ordered frames to have out of order CCMP PN's, resulting * in many, many frame drops. */ ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isfrag = m0->m_flags & M_FRAG; hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ pktlen = m0->m_pkthdr.len - (hdrlen & 3); /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) { ieee80211_free_mbuf(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); pktlen += IEEE80211_CRC_LEN; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); /* setup descriptors */ ds = bf->bf_desc; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* * NB: the 802.11 layer marks whether or not we should * use short preamble based on the current mode and * negotiated parameters. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { shortPreamble = AH_TRUE; sc->sc_stats.ast_tx_shortpre++; } else { shortPreamble = AH_FALSE; } an = ATH_NODE(ni); //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags = 0; ismrr = 0; /* default no multi-rate retry*/ pri = ath_tx_getac(sc, m0); /* honor classification */ /* XXX use txparams instead of fixed values */ /* * Calculate Atheros packet type from IEEE80211 packet header, * setup for rate calculations, and select h/w transmit queue. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) atype = HAL_PKT_TYPE_BEACON; else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) atype = HAL_PKT_TYPE_PROBE_RESP; else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) atype = HAL_PKT_TYPE_ATIM; else atype = HAL_PKT_TYPE_NORMAL; /* XXX */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_CTL: atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_DATA: atype = HAL_PKT_TYPE_NORMAL; /* default */ /* * Data frames: multicast frames go out at a fixed rate, * EAPOL frames use the mgmt frame rate; otherwise consult * the rate control module for the rate to use. */ if (ismcast) { rix = an->an_mcastrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = 1; } else if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } else { /* * Do rate lookup on each TX, rather than using * the hard-coded TX information decided here. */ ismrr = 1; bf->bf_state.bfs_doratelookup = 1; } /* * Check whether to set NOACK for this WME category or not. */ if (ieee80211_wme_vap_ac_is_noack(vap, pri)) flags |= HAL_TXDESC_NOACK; break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ /* XXX free tx dmamap */ ieee80211_free_mbuf(m0); return EIO; } /* * There are two known scenarios where the frame AC doesn't match * what the destination TXQ is. * * + non-QoS frames (eg management?) that the net80211 stack has * assigned a higher AC to, but since it's a non-QoS TID, it's * being thrown into TID 16. TID 16 gets the AC_BE queue. * It's quite possible that management frames should just be * direct dispatched to hardware rather than go via the software * queue; that should be investigated in the future. There are * some specific scenarios where this doesn't make sense, mostly * surrounding ADDBA request/response - hence why that is special * cased. * * + Multicast frames going into the VAP mcast queue. That shows up * as "TXQ 11". * * This driver should eventually support separate TID and TXQ locking, * allowing for arbitrary AC frames to appear on arbitrary software * queues, being queued to the "correct" hardware queue when needed. */ #if 0 if (txq != sc->sc_ac2q[pri]) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", __func__, txq, txq->axq_qnum, pri, sc->sc_ac2q[pri], sc->sc_ac2q[pri]->axq_qnum); } #endif /* * Calculate miscellaneous flags. */ if (ismcast) { flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ } else if (pktlen > vap->iv_rtsthreshold && (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ sc->sc_stats.ast_tx_rts++; } if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ sc->sc_stats.ast_tx_noack++; #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { DPRINTF(sc, ATH_DEBUG_TDMA, "%s: discard frame, ACK required w/ TDMA\n", __func__); sc->sc_stats.ast_tdma_ack++; /* XXX free tx dmamap */ ieee80211_free_mbuf(m0); return EIO; } #endif /* * If it's a frame to do location reporting on, * communicate it to the HAL. */ if (ieee80211_get_toa_params(m0, NULL)) { device_printf(sc->sc_dev, "%s: setting TX positioning bit\n", __func__); flags |= HAL_TXDESC_POS; /* * Note: The hardware reports timestamps for * each of the RX'ed packets as part of the packet * exchange. So this means things like RTS/CTS * exchanges, as well as the final ACK. * * So, if you send a RTS-protected NULL data frame, * you'll get an RX report for the RTS response, then * an RX report for the NULL frame, and then the TX * completion at the end. * * NOTE: it doesn't work right for CCK frames; * there's no channel info data provided unless * it's OFDM or HT. Will have to dig into it. */ flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); bf->bf_flags |= ATH_BUF_TOA_PROBE; } #if 0 /* * Placeholder: if you want to transmit with the azimuth * timestamp in the end of the payload, here's where you * should set the TXDESC field. */ flags |= HAL_TXDESC_HWTS; #endif /* * Determine if a tx interrupt should be generated for * this descriptor. We take a tx interrupt to reap * descriptors when the h/w hits an EOL condition or * when the descriptor is specifically marked to generate * an interrupt. We periodically mark descriptors in this * way to insure timely replenishing of the supply needed * for sending frames. Defering interrupts reduces system * load and potentially allows more concurrent work to be * done but if done to aggressively can cause senders to * backup. * * NB: use >= to deal with sc_txintrperiod changing * dynamically through sysctl. */ if (flags & HAL_TXDESC_INTREQ) { txq->axq_intrcnt = 0; } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { flags |= HAL_TXDESC_INTREQ; txq->axq_intrcnt = 0; } /* This point forward is actual TX bits */ /* * At this point we are committed to sending the frame * and we don't need to look at m_nextpkt; clear it in * case this frame is part of frag chain. */ m0->m_nextpkt = NULL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isfrag) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); /* * ath_buf_set_rate needs at least one rate/try to setup * the rate scenario. */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = sc->sc_txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = shortPreamble; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ bf->bf_state.bfs_ctsrate = 0; /* calculated later */ bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; return 0; } /* * Queue a frame to the hardware or software queue. * * This can be called by the net80211 code. * * XXX what about locking? Or, push the seqno assign into the * XXX aggregate scheduler so its serialised? * * XXX When sending management frames via ath_raw_xmit(), * should CLRDMASK be set unconditionally? */ int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); int r = 0; u_int pri; int tid; struct ath_txq *txq; int ismcast; const struct ieee80211_frame *wh; int is_ampdu, is_ampdu_tx, is_ampdu_pending; ieee80211_seq seqno; uint8_t type, subtype; int queue_to_head; ATH_TX_LOCK_ASSERT(sc); /* * Determine the target hardware queue. * * For multicast frames, the txq gets overridden appropriately * depending upon the state of PS. If powersave is enabled * then they get added to the cabq for later transmit. * * The "fun" issue here is that group addressed frames should * have the sequence number from a different pool, rather than * the per-TID pool. That means that even QoS group addressed * frames will have a sequence number from that global value, * which means if we transmit different group addressed frames * at different traffic priorities, the sequence numbers will * all be out of whack. So - chances are, the right thing * to do here is to always put group addressed frames into the BE * queue, and ignore the TID for queue selection. * * For any other frame, we do a TID/QoS lookup inside the frame * to see what the TID should be. If it's a non-QoS frame, the * AC and TID are overridden. The TID/TXQ code assumes the * TID is on a predictable hardware TXQ, so we don't support * having a node TID queued to multiple hardware TXQs. * This may change in the future but would require some locking * fudgery. */ pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); txq = sc->sc_ac2q[pri]; wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_raw_xmit(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; m_freem(m0); return (ENOBUFS); } } /* * Enforce how deep the unicast queue can grow. * * If the node is in power save then we don't want * the software queue to grow too deep, or a node may * end up consuming all of the ath_buf entries. * * For now, only do this for DATA frames. * * We will want to cap how many management/control * frames get punted to the software queue so it doesn't * fill up. But the correct solution isn't yet obvious. * In any case, this check should at least let frames pass * that we are direct-dispatching. * * XXX TODO: duplicate this to the raw xmit path! */ if (type == IEEE80211_FC0_TYPE_DATA && ATH_NODE(ni)->an_is_powersave && ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_psq_maxdepth) { sc->sc_stats.ast_tx_node_psq_overflow++; m_freem(m0); return (ENOBUFS); } /* A-MPDU TX */ is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); is_ampdu = is_ampdu_tx | is_ampdu_pending; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", __func__, tid, pri, is_ampdu); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; #if 1 /* * When servicing one or more stations in power-save mode * (or) if there is some mcast data waiting on the mcast * queue (to prevent out of order delivery) multicast frames * must be bufferd until after the beacon. * * TODO: we should lock the mcastq before we check the length. */ if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { txq = &avp->av_mcastq; /* * Mark the frame as eventually belonging on the CAB * queue, so the descriptor setup functions will * correctly initialise the descriptor 'qcuId' field. */ bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; } #endif /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; /* A-MPDU TX? Manually set sequence number */ /* * Don't do it whilst pending; the net80211 layer still * assigns them. * * Don't assign A-MPDU sequence numbers to group address * frames; they come from a different sequence number space. */ if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { /* * Always call; this function will * handle making sure that null data frames * and group-addressed frames don't get a sequence number * from the current TID and thus mess with the BAW. */ seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); /* * Don't add QoS NULL frames and group-addressed frames * to the BAW. */ if (IEEE80211_QOS_HAS_SEQ(wh) && (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) { bf->bf_state.bfs_dobaw = 1; } } /* * If needed, the sequence number has been assigned. * Squirrel it away somewhere easy to get to. */ bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; /* Is ampdu pending? fetch the seqno and print it out */ if (is_ampdu_pending) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid %d: ampdu pending, seqno %d\n", __func__, tid, M_SEQNO_GET(m0)); /* This also sets up the DMA map; crypto; frame parameters, etc */ r = ath_tx_normal_setup(sc, ni, bf, m0, txq); if (r != 0) goto done; /* At this point m0 could have changed! */ m0 = bf->bf_m; #if 1 /* * If it's a multicast frame, do a direct-dispatch to the * destination hardware queue. Don't bother software * queuing it. */ /* * If it's a BAR frame, do a direct dispatch to the * destination hardware queue. Don't bother software * queuing it, as the TID will now be paused. * Sending a BAR frame can occur from the net80211 txa timer * (ie, retries) or from the ath txtask (completion call.) * It queues directly to hardware because the TID is paused * at this point (and won't be unpaused until the BAR has * either been TXed successfully or max retries has been * reached.) */ /* * Until things are better debugged - if this node is asleep * and we're sending it a non-BAR frame, direct dispatch it. * Why? Because we need to figure out what's actually being * sent - eg, during reassociation/reauthentication after * the node (last) disappeared whilst asleep, the driver should * have unpaused/unsleep'ed the node. So until that is * sorted out, use this workaround. */ if (txq == &avp->av_mcastq) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { ath_tx_swq(sc, ni, txq, queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } #else /* * For now, since there's no software queue, * direct-dispatch to the hardware. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, txq, bf); #endif done: return 0; } static int ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; int error, ismcast, ismrr; int keyix, hdrlen, pktlen, try0, txantenna; u_int8_t rix, txrate; struct ieee80211_frame *wh; u_int flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; struct ath_desc *ds; u_int pri; int o_tid = -1; int do_override; uint8_t type, subtype; int queue_to_head; struct ath_node *an = ATH_NODE(ni); ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ /* XXX honor IEEE80211_BPF_DATAPAD */ pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; ATH_KTR(sc, ATH_KTR_TX, 2, "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", __func__, ismcast); pri = params->ibp_pri & 3; /* Override pri if the frame isn't a QoS one */ if (! IEEE80211_QOS_HAS_SEQ(wh)) pri = ath_tx_getac(sc, m0); /* XXX If it's an ADDBA, override the correct queue */ do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); /* Map ADDBA to the correct priority */ if (do_override) { #if 1 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: overriding tid %d pri %d -> %d\n", __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); #endif pri = TID_TO_WME_AC(o_tid); } /* * "pri" is the hardware queue to transmit on. * * Look at the description in ath_tx_start() to understand * what needs to be "fixed" here so we just use the TID * for QoS frames. */ /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) { ieee80211_free_mbuf(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); bf->bf_node = ni; /* NB: held reference */ /* Always enable CLRDMASK for raw frames for now.. */ flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags |= HAL_TXDESC_INTREQ; /* force interrupt */ if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= HAL_TXDESC_RTSENA; else if (params->ibp_flags & IEEE80211_BPF_CTS) { /* XXX assume 11g/11n protection? */ bf->bf_state.bfs_doprot = 1; flags |= HAL_TXDESC_CTSENA; } /* XXX leave ismcast to injector? */ if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) flags |= HAL_TXDESC_NOACK; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* Fetch first rate information */ rix = ath_tx_findrix(sc, params->ibp_rate0); try0 = params->ibp_try0; /* * Override EAPOL rate as appropriate. */ if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } /* * If it's a frame to do location reporting on, * communicate it to the HAL. */ if (ieee80211_get_toa_params(m0, NULL)) { device_printf(sc->sc_dev, "%s: setting TX positioning bit\n", __func__); flags |= HAL_TXDESC_POS; flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); bf->bf_flags |= ATH_BUF_TOA_PROBE; } txrate = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) txrate |= rt->info[rix].shortPreamble; sc->sc_txrix = rix; ismrr = (params->ibp_try1 != 0); txantenna = params->ibp_pri >> 2; if (txantenna == 0) /* XXX? */ txantenna = sc->sc_txantenna; /* * Since ctsrate is fixed, store it away for later * use when the descriptor fields are being set. */ if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; /* * NB: we mark all packets as type PSPOLL so the h/w won't * set the sequence number, duration, etc. */ atype = HAL_PKT_TYPE_PSPOLL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (m0->m_flags & M_FRAG) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Formulate first tx descriptor with tx controls. */ ds = bf->bf_desc; /* XXX check return value? */ /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; bf->bf_state.bfs_pri = pri; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; if (ismrr) { int rix; rix = ath_tx_findrix(sc, params->ibp_rate1); bf->bf_state.bfs_rc[1].rix = rix; bf->bf_state.bfs_rc[1].tries = params->ibp_try1; rix = ath_tx_findrix(sc, params->ibp_rate2); bf->bf_state.bfs_rc[2].rix = rix; bf->bf_state.bfs_rc[2].tries = params->ibp_try2; rix = ath_tx_findrix(sc, params->ibp_rate3); bf->bf_state.bfs_rc[3].rix = rix; bf->bf_state.bfs_rc[3].tries = params->ibp_try3; } /* * All the required rate control decisions have been made; * fill in the rc flags. */ ath_tx_rate_fill_rcflags(sc, bf); /* NB: no buffered multicast in power save support */ /* * If we're overiding the ADDBA destination, dump directly * into the hardware queue, right after any pending * frames to that node are. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", __func__, do_override); #if 1 /* * Put addba frames in the right place in the right TID/HWQ. */ if (do_override) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * XXX if it's addba frames, should we be leaking * them out via the frame leak method? * XXX for now let's not risk it; but we may wish * to investigate this later. */ ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { /* Queue to software queue */ ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } #else /* Direct-dispatch to the hardware */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); #endif return 0; } /* * Send a raw frame. * * This can be called by net80211. */ int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_buf *bf; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); int error = 0; ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: sc_inreset_cnt > 0; bailing\n", __func__); error = EIO; ATH_PCU_UNLOCK(sc); goto badbad; } sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); /* Wake the hardware up already */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_TX_LOCK(sc); if (!sc->sc_running || sc->sc_invalid) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", __func__, sc->sc_running, sc->sc_invalid); m_freem(m); error = ENETDOWN; goto bad; } /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_tx_start(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; error = ENOBUFS; } if (error != 0) { m_freem(m); goto bad; } } /* * Grab a TX buffer and associated resources. */ bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); if (bf == NULL) { sc->sc_stats.ast_tx_nobuf++; m_freem(m); error = ENOBUFS; goto bad; } ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", m, params, bf); if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ath_tx_start(sc, ni, bf, m)) { error = EIO; /* XXX */ goto bad2; } } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ath_tx_raw_start(sc, ni, bf, m, params)) { error = EIO; /* XXX */ goto bad2; } } sc->sc_wd_timer = 5; sc->sc_stats.ast_tx_raw++; /* * Update the TIM - if there's anything queued to the * software queue and power save is enabled, we should * set the TIM. */ ath_tx_update_tim(sc, ni, 1); ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Put the hardware back to sleep if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return 0; bad2: ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " "bf=%p", m, params, bf); ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); ATH_TXBUF_UNLOCK(sc); bad: ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Put the hardware back to sleep if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); badbad: ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", m, params); sc->sc_stats.ast_tx_raw_fail++; return error; } /* Some helper functions */ /* * ADDBA (and potentially others) need to be placed in the same * hardware queue as the TID/node it's relating to. This is so * it goes out after any pending non-aggregate frames to the * same node/TID. * * If this isn't done, the ADDBA can go out before the frames * queued in hardware. Even though these frames have a sequence * number -earlier- than the ADDBA can be transmitted (but * no frames whose sequence numbers are after the ADDBA should * be!) they'll arrive after the ADDBA - and the receiving end * will simply drop them as being out of the BAW. * * The frames can't be appended to the TID software queue - it'll * never be sent out. So these frames have to be directly * dispatched to the hardware, rather than queued in software. * So if this function returns true, the TXQ has to be * overridden and it has to be directly dispatched. * * It's a dirty hack, but someone's gotta do it. */ /* * XXX doesn't belong here! */ static int ieee80211_is_action(struct ieee80211_frame *wh) { /* Type: Management frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return 0; /* Subtype: Action frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != IEEE80211_FC0_SUBTYPE_ACTION) return 0; return 1; } /* * Return an alternate TID for ADDBA request frames. * * Yes, this likely should be done in the net80211 layer. */ static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid) { struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); struct ieee80211_action_ba_addbarequest *ia; uint8_t *frm; uint16_t baparamset; /* Not action frame? Bail */ if (! ieee80211_is_action(wh)) return 0; /* XXX Not needed for frames we send? */ #if 0 /* Correct length? */ if (! ieee80211_parse_action(ni, m)) return 0; #endif /* Extract out action frame */ frm = (u_int8_t *)&wh[1]; ia = (struct ieee80211_action_ba_addbarequest *) frm; /* Not ADDBA? Bail */ if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) return 0; if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) return 0; /* Extract TID, return it */ baparamset = le16toh(ia->rq_baparamset); *tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID); return 1; } /* Per-node software queue operations */ /* * Add the current packet to the given BAW. * It is assumed that the current packet * * + fits inside the BAW; * + already has had a sequence number allocated. * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); if (bf->bf_state.bfs_isretried) return; tap = ath_tx_get_tx_tid(an, tid->tid); if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: dobaw=0, seqno=%d, window %d:%d\n", __func__, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd); } if (bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: re-added? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); /* * Verify that the given sequence number is not outside of the * BAW. Complain loudly if that's the case. */ if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno))) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); } /* * ni->ni_txseqs[] is the currently allocated seqno. * the txa state contains the current baw start. */ index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, tid->baw_tail); #if 0 assert(tid->tx_buf[cindex] == NULL); #endif if (tid->tx_buf[cindex] != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ba packet dup (index=%d, cindex=%d, " "head=%d, tail=%d)\n", __func__, index, cindex, tid->baw_head, tid->baw_tail); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", __func__, tid->tx_buf[cindex], SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), bf, SEQNO(bf->bf_state.bfs_seqno) ); } tid->tx_buf[cindex] = bf; if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) { tid->baw_tail = cindex; INCR(tid->baw_tail, ATH_TID_MAX_BUFS); } } /* * Flip the BAW buffer entry over from the existing one to the new one. * * When software retransmitting a (sub-)frame, it is entirely possible that * the frame ath_buf is marked as BUSY and can't be immediately reused. * In that instance the buffer is cloned and the new buffer is used for * retransmit. We thus need to update the ath_buf slot in the BAW buf * tracking array to maintain consistency. */ static void ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(old_bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); /* * Just warn for now; if it happens then we should find out * about it. It's highly likely the aggregation session will * soon hang. */ if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: retransmitted buffer" " has mismatching seqno's, BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old seqno=%d, new_seqno=%d\n", __func__, old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); } if (tid->tx_buf[cindex] != old_bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ath_buf pointer incorrect; " " has m BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); } tid->tx_buf[cindex] = new_bf; } /* * seq_start - left edge of BAW * seq_next - current/next sequence number to allocate * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, const struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " "baw head=%d, tail=%d\n", __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, cindex, tid->baw_head, tid->baw_tail); /* * If this occurs then we have a big problem - something else * has slid tap->txa_start along without updating the BAW * tracking start/end pointers. Thus the TX BAW state is now * completely busted. * * But for now, since I haven't yet fixed TDMA and buffer cloning, * it's quite possible that a cloned buffer is making its way * here and causing it to fire off. Disable TDMA for now. */ if (tid->tx_buf[cindex] != bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), tid->tx_buf[cindex], (tid->tx_buf[cindex] != NULL) ? SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); } tid->tx_buf[cindex] = NULL; while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { INCR(tap->txa_start, IEEE80211_SEQ_RANGE); INCR(tid->baw_head, ATH_TID_MAX_BUFS); } DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d: baw is now %d:%d, baw head=%d\n", __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); } static void ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_frame *wh; ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Update MORE based on the software/net80211 queue states. */ if ((tid->an->an_stack_psq > 0) || (tid->an->an_swq_depth > 0)) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; else wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->an->an_leak_count, tid->an->an_stack_psq, tid->an->an_swq_depth, !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); /* * Re-sync the underlying buffer. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); tid->an->an_leak_count --; } } static int ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { return (1); } if (tid->paused) return (0); return (1); } /* * Mark the current node/TID as ready to TX. * * This is done to make it easy for the software scheduler to * find which nodes have data to send. * * The TXQ lock must be held. */ void ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); /* * If we are leaking out a frame to this destination * for PS-POLL, ensure that we allow scheduling to * occur. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) return; /* paused, can't schedule yet */ if (tid->sched) return; /* already scheduled */ tid->sched = 1; #if 0 /* * If this is a sleeping node we're leaking to, given * it a higher priority. This is so bad for QoS it hurts. */ if (tid->an->an_leak_count) { TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); } else { TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } #endif /* * We can't do the above - it'll confuse the TXQ software * scheduler which will keep checking the _head_ TID * in the list to see if it has traffic. If we queue * a TID to the head of the list and it doesn't transmit, * we'll check it again. * * So, get the rest of this leaking frames support working * and reliable first and _then_ optimise it so they're * pushed out in front of any other pending software * queued nodes. */ TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } /* * Mark the current node as no longer needing to be polled for * TX packets. * * The TXQ lock must be held. */ static void ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); if (tid->sched == 0) return; tid->sched = 0; TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); } /* * Assign a sequence number manually to the given frame. * * This should only be called for A-MPDU TX frames. * * Note: for group addressed frames, the sequence number * should be from NONQOS_TID, and net80211 should have * already assigned it for us. */ static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211_frame *wh; int tid; ieee80211_seq seqno; uint8_t subtype; wh = mtod(m0, struct ieee80211_frame *); tid = ieee80211_gettid(wh); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n", __func__, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* XXX Is it a control frame? Ignore */ /* Does the packet require a sequence number? */ if (! IEEE80211_QOS_HAS_SEQ(wh)) return -1; ATH_TX_LOCK_ASSERT(sc); /* * Is it a QOS NULL Data frame? Give it a sequence number from * the default TID (IEEE80211_NONQOS_TID.) * * The RX path of everything I've looked at doesn't include the NULL * data frame sequence number in the aggregation state updates, so * assigning it a sequence number there will cause a BAW hole on the * RX side. */ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { /* XXX no locking for this TID? This is a bit of a problem. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* * group addressed frames get a sequence number from * a different sequence number space. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); } else { /* Manually assign sequence number */ seqno = ni->ni_txseqs[tid]; INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); } *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m0, seqno); /* Return so caller can do something with it if needed */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", __func__, subtype, tid, seqno); return seqno; } /* * Attempt to direct dispatch an aggregate frame to hardware. * If the frame is out of BAW, queue. * Otherwise, schedule it as a single frame. */ static void ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_txq *txq, struct ath_buf *bf) { struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); /* paused? queue */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); /* XXX don't sched - we're paused! */ return; } /* outside baw? queue */ if (bf->bf_state.bfs_dobaw && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno)))) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); ath_tx_tid_sched(sc, tid); return; } /* * This is a temporary check and should be removed once * all the relevant code paths have been fixed. * * During aggregate retries, it's possible that the head * frame will fail (which has the bfs_aggr and bfs_nframes * fields set for said aggregate) and will be retried as * a single frame. In this instance, the values should * be reset or the completion code will get upset with you. */ if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; } /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Direct dispatch to hardware */ ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Statistics */ sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Add to BAW */ if (bf->bf_state.bfs_dobaw) { ath_tx_addto_baw(sc, an, tid, bf); bf->bf_state.bfs_addedbaw = 1; } /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Attempt to send the packet. * If the queue isn't busy, direct-dispatch. * If the queue is busy enough, queue the given packet on the * relevant software queue. */ void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(ni); struct ieee80211_frame *wh; struct ath_tid *atid; int pri, tid; struct mbuf *m0 = bf->bf_m; ATH_TX_LOCK_ASSERT(sc); /* Fetch the TID - non-QoS frames get assigned to TID 16 */ wh = mtod(m0, struct ieee80211_frame *); pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* Set local packet state, used to queue packets to hardware */ /* XXX potentially duplicate info, re-check */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; /* * If the hardware queue isn't busy, queue it directly. * If the hardware queue is busy, queue it. * If the TID is paused or the traffic it outside BAW, software * queue it. * * If the node is in power-save and we're leaking a frame, * leak a single frame. */ if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { /* TID is paused, queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); /* * If the caller requested that it be sent at a high * priority, queue it at the head of the list. */ if (queue_to_head) ATH_TID_INSERT_HEAD(atid, bf, bf_list); else ATH_TID_INSERT_TAIL(atid, bf, bf_list); } else if (ath_tx_ampdu_pending(sc, an, tid)) { /* AMPDU pending; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* XXX sched? */ } else if (ath_tx_ampdu_running(sc, an, tid)) { /* * AMPDU running, queue single-frame if the hardware queue * isn't busy. * * If the hardware queue is busy, sending an aggregate frame * then just hold off so we can queue more aggregate frames. * * Otherwise we may end up with single frames leaking through * because we are dispatching them too quickly. * * TODO: maybe we should treat this as two policies - minimise * latency, or maximise throughput. Then for BE/BK we can * maximise throughput, and VO/VI (if AMPDU is enabled!) * minimise latency. */ /* * Always queue the frame to the tail of the list. */ ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* * If the hardware queue isn't busy, direct dispatch * the head frame in the list. * * Note: if we're say, configured to do ADDBA but not A-MPDU * then maybe we want to still queue two non-aggregate frames * to the hardware. Again with the per-TID policy * configuration..) * * Otherwise, schedule the TID. */ /* XXX TXQ locking */ if (txq->axq_depth + txq->fifo.axq_depth == 0) { bf = ATH_TID_FIRST(atid); ATH_TID_REMOVE(atid, bf, bf_list); /* * Ensure it's definitely treated as a non-AMPDU * frame - this information may have been left * over from a previous attempt. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Queue to the hardware */ ath_tx_xmit_aggr(sc, an, txq, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_aggr\n", __func__); } else { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ampdu; swq'ing\n", __func__); ath_tx_tid_sched(sc, atid); } /* * If we're not doing A-MPDU, be prepared to direct dispatch * up to both limits if possible. This particular corner * case may end up with packet starvation between aggregate * traffic and non-aggregate traffic: we want to ensure * that non-aggregate stations get a few frames queued to the * hardware before the aggregate station(s) get their chance. * * So if you only ever see a couple of frames direct dispatched * to the hardware from a non-AMPDU client, check both here * and in the software queue dispatcher to ensure that those * non-AMPDU stations get a fair chance to transmit. */ /* XXX TXQ locking */ } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { /* AMPDU not running, attempt direct dispatch */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); /* See if clrdmask needs to be set */ ath_tx_update_clrdmask(sc, atid, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, atid, bf); /* * Dispatch the frame. */ ath_tx_xmit_normal(sc, txq, bf); } else { /* Busy; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); } } /* * Only set the clrdmask bit if none of the nodes are currently * filtered. * * XXX TODO: go through all the callers and check to see * which are being called in the context of looping over all * TIDs (eg, if all tids are being paused, resumed, etc.) * That'll avoid O(n^2) complexity here. */ static void ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) { int i; ATH_TX_LOCK_ASSERT(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { if (an->an_tid[i].isfiltered == 1) return; } an->clrdmask = 1; } /* * Configure the per-TID node state. * * This likely belongs in if_ath_node.c but I can't think of anywhere * else to put it just yet. * * This sets up the SLISTs and the mutex as appropriate. */ void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) { int i, j; struct ath_tid *atid; for (i = 0; i < IEEE80211_TID_SIZE; i++) { atid = &an->an_tid[i]; /* XXX now with this bzer(), is the field 0'ing needed? */ bzero(atid, sizeof(*atid)); TAILQ_INIT(&atid->tid_q); TAILQ_INIT(&atid->filtq.tid_q); atid->tid = i; atid->an = an; for (j = 0; j < ATH_TID_MAX_BUFS; j++) atid->tx_buf[j] = NULL; atid->baw_head = atid->baw_tail = 0; atid->paused = 0; atid->sched = 0; atid->hwq_depth = 0; atid->cleanup_inprogress = 0; if (i == IEEE80211_NONQOS_TID) atid->ac = ATH_NONQOS_TID_AC; else atid->ac = TID_TO_WME_AC(i); } an->clrdmask = 1; /* Always start by setting this bit */ } /* * Pause the current TID. This stops packets from being transmitted * on it. * * Since this is also called from upper layers as well as the driver, * it will get the TID lock. */ static void ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); tid->paused++; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->paused); } /* * Unpause the current TID, and schedule it if needed. */ static void ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); /* * There's some odd places where ath_tx_tid_resume() is called * when it shouldn't be; this works around that particular issue * until it's actually resolved. */ if (tid->paused == 0) { device_printf(sc->sc_dev, "%s: [%6D]: tid=%d, paused=0?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); } else { tid->paused--; } DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, unpaused = %d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->paused); if (tid->paused) return; /* * Override the clrdmask configuration for the next frame * from this TID, just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); if (tid->axq_depth == 0) return; /* XXX isfiltered shouldn't ever be 0 at this point */ if (tid->isfiltered == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", __func__); return; } ath_tx_tid_sched(sc, tid); /* * Queue the software TX scheduler. */ ath_tx_swq_kick(sc); } /* * Add the given ath_buf to the TID filtered frame list. * This requires the TID be filtered. */ static void ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (!tid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); /* Set the retry bit and bump the retry counter */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swfiltered++; ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); } /* * Handle a completed filtered frame from the given TID. * This just enables/pauses the filtered frame state if required * and appends the filtered frame to the filtered queue. */ static void ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (! tid->isfiltered) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", __func__, tid->tid); tid->isfiltered = 1; ath_tx_tid_pause(sc, tid); } /* Add the frame to the filter queue */ ath_tx_tid_filt_addbuf(sc, tid, bf); } /* * Complete the filtered frame TX completion. * * If there are no more frames in the hardware queue, unpause/unfilter * the TID if applicable. Otherwise we will wait for a node PS transition * to unfilter. */ static void ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) { struct ath_buf *bf; int do_resume = 0; ATH_TX_LOCK_ASSERT(sc); if (tid->hwq_depth != 0) return; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", __func__, tid->tid); if (tid->isfiltered == 1) { tid->isfiltered = 0; do_resume = 1; } /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ ath_tx_set_clrdmask(sc, tid->an); /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(tid, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } /* And only resume if we had paused before */ if (do_resume) ath_tx_tid_resume(sc, tid); } /* * Called when a single (aggregate or otherwise) frame is completed. * * Returns 0 if the buffer could be added to the filtered list * (cloned or otherwise), 1 if the buffer couldn't be added to the * filtered list (failed clone; expired retry) and the caller should * free it and handle it like a failure (eg by sending a BAR.) * * since the buffer may be cloned, bf must be not touched after this * if the return value is 0. */ static int ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int retval; ATH_TX_LOCK_ASSERT(sc); /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p, seqno=%d, exceeded retries\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); retval = 1; /* error */ goto finish; } /* * A busy buffer can't be added to the retry list. * It needs to be cloned. */ if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer clone: %p -> %p\n", __func__, bf, nbf); } else { nbf = bf; } if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer couldn't be cloned (%p)!\n", __func__, bf); retval = 1; /* error */ } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); retval = 0; /* ok */ } finish: ath_tx_tid_filt_comp_complete(sc, tid); return (retval); } static void ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf_first, ath_bufhead *bf_q) { struct ath_buf *bf, *bf_next, *nbf; ATH_TX_LOCK_ASSERT(sc); bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); goto next; } if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); } else { nbf = bf; } /* * If the buffer couldn't be cloned, add it to bf_q; * the caller will free the buffer(s) as required. */ if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); } next: bf = bf_next; } ath_tx_tid_filt_comp_complete(sc, tid); } /* * Suspend the queue because we need to TX a BAR. */ static void ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", __func__, tid->tid, tid->bar_wait, tid->bar_tx); /* We shouldn't be called when bar_tx is 1 */ if (tid->bar_tx) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: bar_tx is 1?!\n", __func__); } /* If we've already been called, just be patient. */ if (tid->bar_wait) return; /* Wait! */ tid->bar_wait = 1; /* Only one pause, no matter how many frames fail */ ath_tx_tid_pause(sc, tid); } /* * We've finished with BAR handling - either we succeeded or * failed. Either way, unsuspend TX. */ static void ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); if (tid->bar_tx == 0 || tid->bar_wait == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); } tid->bar_tx = tid->bar_wait = 0; ath_tx_tid_resume(sc, tid); } /* * Return whether we're ready to TX a BAR frame. * * Requires the TID lock be held. */ static int ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->bar_wait == 0 || tid->hwq_depth > 0) return (0); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar ready\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); return (1); } /* * Check whether the current TID is ready to have a BAR * TXed and if so, do the TX. * * Since the TID/TXQ lock can't be held during a call to * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, * sending the BAR and locking it again. * * Eventually, the code to send the BAR should be broken out * from this routine so the lock doesn't have to be reacquired * just to be immediately dropped by the caller. */ static void ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) { struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); tap = ath_tx_get_tx_tid(tid->an, tid->tid); /* * This is an error condition! */ if (tid->bar_wait == 0 || tid->bar_tx == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); return; } /* Don't do anything if we still have pending frames */ if (tid->hwq_depth > 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->hwq_depth); return; } /* We're now about to TX */ tid->bar_tx = 1; /* * Override the clrdmask configuration for the next frame, * just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); /* * Calculate new BAW left edge, now that all frames have either * succeeded or failed. * * XXX verify this is _actually_ the valid value to begin at! */ DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, new BAW left edge=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tap->txa_start); /* Try sending the BAR frame */ /* We can't hold the lock here! */ ATH_TX_UNLOCK(sc); if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { /* Success? Now we wait for notification that it's done */ ATH_TX_LOCK(sc); return; } /* Failure? For now, warn loudly and continue */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, failed to TX BAR, continue!\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); ath_tx_tid_bar_unsuspend(sc, tid); } static void ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); /* * If the current TID is running AMPDU, update * the BAW. */ if (ath_tx_ampdu_running(sc, an, tid->tid) && bf->bf_state.bfs_dobaw) { /* * Only remove the frame from the BAW if it's * been transmitted at least once; this means * the frame was in the BAW to begin with. */ if (bf->bf_state.bfs_retries > 0) { ath_tx_update_baw(sc, an, tid, bf); bf->bf_state.bfs_dobaw = 0; } #if 0 /* * This has become a non-fatal error now */ if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); #endif } /* Strip it out of an aggregate list if it was in one */ bf->bf_next = NULL; /* Insert on the free queue to be freed by the caller */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); } static void ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, const char *pfx, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_node *ni = &an->an_node; struct ath_txq *txq; struct ieee80211_tx_ampdu *tap; txq = sc->sc_ac2q[tid->ac]; tap = ath_tx_get_tx_tid(an, tid->tid); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " "seqno=%d, retry=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, bf->bf_state.bfs_addedbaw, bf->bf_state.bfs_dobaw, SEQNO(bf->bf_state.bfs_seqno), bf->bf_state.bfs_retries); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, txq->axq_qnum, txq->axq_depth, txq->axq_aggr_depth); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " "isfiltered=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, tid->axq_depth, tid->hwq_depth, tid->bar_wait, tid->isfiltered); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: tid %d: " "sched=%d, paused=%d, " "incomp=%d, baw_head=%d, " "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", __func__, pfx, ni->ni_macaddr, ":", tid->tid, tid->sched, tid->paused, tid->incomp, tid->baw_head, tid->baw_tail, tap == NULL ? -1 : tap->txa_start, ni->ni_txseqs[tid->tid]); /* XXX Dump the frame, see what it is? */ if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ni->ni_ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } /* * Free any packets currently pending in the software TX queue. * * This will be called when a node is being deleted. * * It can also be called on an active node during an interface * reset or state transition. * * (From Linux/reference): * * TODO: For frame(s) that are in the retry state, we will reuse the * sequence number(s) without setting the retry bit. The * alternative is to give up on these and BAR the receiver's window * forward. */ static void ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq) { struct ath_buf *bf; struct ieee80211_tx_ampdu *tap; struct ieee80211_node *ni = &an->an_node; int t; tap = ath_tx_get_tx_tid(an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Walk the queue, free frames */ t = 0; for (;;) { bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } if (t == 0) { ath_tx_tid_drain_print(sc, an, "norm", tid, bf); // t = 1; } ATH_TID_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* And now, drain the filtered frame queue */ t = 0; for (;;) { bf = ATH_TID_FILT_FIRST(tid); if (bf == NULL) break; if (t == 0) { ath_tx_tid_drain_print(sc, an, "filt", tid, bf); // t = 1; } ATH_TID_FILT_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* * Override the clrdmask configuration for the next frame * in case there is some future transmission, just to get * the ball rolling. * * This won't hurt things if the TID is about to be freed. */ ath_tx_set_clrdmask(sc, tid->an); /* * Now that it's completed, grab the TID lock and update * the sequence number and BAW window. * Because sequence numbers have been assigned to frames * that haven't been sent yet, it's entirely possible * we'll be called with some pending frames that have not * been transmitted. * * The cleaner solution is to do the sequence number allocation * when the packet is first transmitted - and thus the "retries" * check above would be enough to update the BAW/seqno. */ /* But don't do it for non-QoS TIDs */ if (tap) { #if 1 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", __func__, ni->ni_macaddr, ":", an, tid->tid, tap->txa_start); #endif ni->ni_txseqs[tid->tid] = tap->txa_start; tid->baw_tail = tid->baw_head; } } /* * Reset the TID state. This must be only called once the node has * had its frames flushed from this TID, to ensure that no other * pause / unpause logic can kick in. */ static void ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) { #if 0 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; tid->paused = tid->sched = tid->addba_tx_pending = 0; tid->incomp = tid->cleanup_inprogress = 0; #endif /* * If we have a bar_wait set, we need to unpause the TID * here. Otherwise once cleanup has finished, the TID won't * have the right paused counter. * * XXX I'm not going through resume here - I don't want the * node to be rescheuled just yet. This however should be * methodized! */ if (tid->bar_wait) { if (tid->paused > 0) { tid->paused --; } } /* * XXX same with a currently filtered TID. * * Since this is being called during a flush, we assume that * the filtered frame list is actually empty. * * XXX TODO: add in a check to ensure that the filtered queue * depth is actually 0! */ if (tid->isfiltered) { if (tid->paused > 0) { tid->paused --; } } /* * Clear BAR, filtered frames, scheduled and ADDBA pending. * The TID may be going through cleanup from the last association * where things in the BAW are still in the hardware queue. */ tid->bar_wait = 0; tid->bar_tx = 0; tid->isfiltered = 0; tid->sched = 0; tid->addba_tx_pending = 0; /* * XXX TODO: it may just be enough to walk the HWQs and mark * frames for that node as non-aggregate; or mark the ath_node * with something that indicates that aggregation is no longer * occurring. Then we can just toss the BAW complaints and * do a complete hard reset of state here - no pause, no * complete counter, etc. */ } /* * Flush all software queued packets for the given node. * * This occurs when a completion handler frees the last buffer * for a node, and the node is thus freed. This causes the node * to be cleaned up, which ends up calling ath_tx_node_flush. */ void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) { int tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", &an->an_node); ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " "swq_depth=%d, clrdmask=%d, leak_count=%d\n", __func__, an->an_node.ni_macaddr, ":", an->an_is_powersave, an->an_stack_psq, an->an_tim_set, an->an_swq_depth, an->clrdmask, an->an_leak_count); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { struct ath_tid *atid = &an->an_tid[tid]; /* Free packets */ ath_tx_tid_drain(sc, an, atid, &bf_cq); /* Remove this tid from the list of active tids */ ath_tx_tid_unsched(sc, atid); /* Reset the per-TID pause, BAR, etc state */ ath_tx_tid_reset(sc, atid); } /* * Clear global leak count */ an->an_leak_count = 0; ATH_TX_UNLOCK(sc); /* Handle completed frames */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Drain all the software TXQs currently with traffic queued. */ void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); /* * Iterate over all active tids for the given txq, * flushing and unsched'ing them */ while (! TAILQ_EMPTY(&txq->axq_tidq)) { tid = TAILQ_FIRST(&txq->axq_tidq); ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); ath_tx_tid_unsched(sc, tid); } ATH_TX_UNLOCK(sc); while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of non-aggregate session frames. * * This (currently) doesn't implement software retransmission of * non-aggregate frames! * * Software retransmission of non-aggregate frames needs to obey * the strict sequence number ordering, and drop any frames that * will fail this. * * For now, filtered frames and frame transmission will cause * all kinds of issues. So we don't support them. * * So anyone queuing frames via ath_tx_normal_xmit() or * ath_tx_hw_queue_norm() must override and set CLRDMASK. */ void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status *ts = &bf->bf_status.ds_txstat; /* The TID state is protected behind the TXQ lock */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", __func__, bf, fail, atid->hwq_depth - 1); atid->hwq_depth--; #if 0 /* * If the frame was filtered, stick it on the filter frame * queue and complain about it. It shouldn't happen! */ if ((ts->ts_status & HAL_TXERR_FILT) || (ts->ts_status != 0 && atid->isfiltered)) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=%d, ts_status=%d: huh?\n", __func__, atid->isfiltered, ts->ts_status); ath_tx_tid_filt_comp_buf(sc, atid, bf); } #endif if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* If the TID is being cleaned up, track things */ /* XXX refactor! */ if (atid->cleanup_inprogress) { atid->incomp--; if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } } /* * If the queue is filtered, potentially mark it as complete * and reschedule it as needed. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); ATH_TX_UNLOCK(sc); /* * punt to rate control if we're not being cleaned up * during a hw queue drain and the frame wanted an ACK. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, ts, bf->bf_state.bfs_pktlen, bf->bf_state.bfs_pktlen, 1, (ts->ts_status == 0) ? 0 : 1); ath_tx_default_comp(sc, bf, fail); } /* * Handle cleanup of aggregate session packets that aren't * an A-MPDU. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", __func__, tid, atid->incomp); ATH_TX_LOCK(sc); atid->incomp--; /* XXX refactor! */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, 0); } /* * This as it currently stands is a bit dumb. Ideally we'd just * fail the frame the normal way and have it permanently fail * via the normal aggregate completion path. */ static void ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) { struct ath_tid *atid = &an->an_tid[tid]; struct ath_buf *bf, *bf_next; ATH_TX_LOCK_ASSERT(sc); /* * Remove this frame from the queue. */ ATH_TID_REMOVE(atid, bf_head, bf_list); /* * Loop over all the frames in the aggregate. */ bf = bf_head; while (bf != NULL) { bf_next = bf->bf_next; /* next aggregate frame, or NULL */ /* * If it's been added to the BAW we need to kick * it out of the BAW before we continue. * * XXX if it's an aggregate, assert that it's in the * BAW - we shouldn't have it be in an aggregate * otherwise! */ if (bf->bf_state.bfs_addedbaw) { ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; } /* * Give it the default completion handler. */ bf->bf_comp = ath_tx_normal_comp; bf->bf_next = NULL; /* * Add it to the list to free. */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); /* * Now advance to the next frame in the aggregate. */ bf = bf_next; } } /* * Performs transmit side cleanup when TID changes from aggregated to * unaggregated and during reassociation. * * For now, this just tosses everything from the TID software queue * whether or not it has been retried and marks the TID as * pending completion if there's anything for this TID queued to * the hardware. * * The caller is responsible for pausing the TID and unpausing the * TID if no cleanup was required. Otherwise the cleanup path will * unpause the TID once the last hardware queued frame is completed. */ static void ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, ath_bufhead *bf_cq) { struct ath_tid *atid = &an->an_tid[tid]; struct ath_buf *bf, *bf_next; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: TID %d: called; inprogress=%d\n", __func__, tid, atid->cleanup_inprogress); /* * Move the filtered frames to the TX queue, before * we run off and discard/process things. */ /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(atid, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Update the frames in the software TX queue: * * + Discard retry frames in the queue * + Fix the completion function to be non-aggregate */ bf = ATH_TID_FIRST(atid); while (bf) { /* * Grab the next frame in the list, we may * be fiddling with the list. */ bf_next = TAILQ_NEXT(bf, bf_list); /* * Free the frame and all subframes. */ ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); /* * Next frame! */ bf = bf_next; } /* * If there's anything in the hardware queue we wait * for the TID HWQ to empty. */ if (atid->hwq_depth > 0) { /* * XXX how about we kill atid->incomp, and instead * replace it with a macro that checks that atid->hwq_depth * is 0? */ atid->incomp = atid->hwq_depth; atid->cleanup_inprogress = 1; } if (atid->cleanup_inprogress) DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleanup needed: %d packets\n", __func__, tid, atid->incomp); /* Owner now must free completed frames */ } static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int error; /* * Clone the buffer. This will handle the dma unmap and * copy the node reference to the new buffer. If this * works out, 'bf' will have no DMA mapping, no mbuf * pointer and no node reference. */ nbf = ath_buf_clone(sc, bf); #if 0 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", __func__); #endif if (nbf == NULL) { /* Failed to clone */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to clone a busy buffer\n", __func__); return NULL; } /* Setup the dma for the new buffer */ error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); if (error != 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to setup dma for clone\n", __func__); /* * Put this at the head of the list, not tail; * that way it doesn't interfere with the * busy buffer logic (which uses the tail of * the list.) */ ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, nbf); ATH_TXBUF_UNLOCK(sc); return NULL; } /* Update BAW if required, before we free the original buf */ if (bf->bf_state.bfs_dobaw) ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); /* Free original buffer; return new buffer */ ath_freebuf(sc, bf); return nbf; } /* * Handle retrying an unaggregate frame in an aggregate * session. * * If too many retries occur, pause the TID, wait for * any further retransmits (as there's no reason why * non-aggregate frames in an aggregate session are * transmitted in-order; they just have to be in-BAW) * and then queue a BAR. */ static void ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid); /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: exceeded retries; seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); sc->sc_stats.ast_tx_swretrymax++; /* Update BAW anyway */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Free buffer, bf is free after this call */ ath_tx_default_comp(sc, bf, 0); return; } /* * This increments the retry counter as well as * sets the retry flag in the ath_buf and packet * body. */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; /* * Insert this at the head of the queue, so it's * retried before any current/subsequent frames. */ ATH_TID_INSERT_HEAD(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); } /* * Common code for aggregate excessive retry/subframe retry. * If retrying, queues buffers to bf_q. If not, frees the * buffers. * * XXX should unify this with ath_tx_aggr_retry_unaggr() */ static int ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, ath_bufhead *bf_q) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK_ASSERT(sc); /* XXX clr11naggr should be done for all subframes */ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: max retries: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_state.bfs_dobaw = 0; return 1; } ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; bf->bf_next = NULL; /* Just to make sure */ /* Clear the aggregate state */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ bf->bf_state.bfs_nframes = 1; TAILQ_INSERT_TAIL(bf_q, bf, bf_list); return 0; } /* * error pkt completion for an aggregate destination */ static void ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, struct ath_tid *tid) { struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); struct ath_buf *bf_next, *bf; ath_bufhead bf_q; int drops = 0; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_cq; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* * Update rate control - all frames have failed. */ ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, &bf_first->bf_status.ds_txstat, bf_first->bf_state.bfs_al, bf_first->bf_state.bfs_rc_maxpktlen, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid->tid); sc->sc_stats.ast_tx_aggr_failall++; /* Retry all subframes */ bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } bf = bf_next; } /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } /* * Schedule the TID to be re-tried. */ ath_tx_tid_sched(sc, tid); /* * send bar if we dropped any frames * * Keep the txq lock held for now, as we need to ensure * that ni_txseqs[] is consistent (as it's being updated * in the ifnet TX context or raw TX context.) */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, tid); } /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, tid)) ath_tx_tid_bar_tx(sc, tid); ATH_TX_UNLOCK(sc); /* Complete frames which errored out */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle clean-up of packets from an aggregate list. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_next; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK(sc); /* update incomp */ atid->incomp--; /* Update the BAW */ bf = bf_first; while (bf) { /* XXX refactor! */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf = bf->bf_next; } if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } /* Send BAR if required */ /* XXX why would we send a BAR when transitioning to non-aggregation? */ /* * XXX TODO: we should likely just tear down the BAR state here, * rather than sending a BAR. */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Handle frame completion as individual frames */ bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; ath_tx_default_comp(sc, bf, 1); bf = bf_next; } } /* * Handle completion of an set of aggregate frames. * * Note: the completion handler is the last descriptor in the aggregate, * not the last descriptor in the first frame. */ static void ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail) { //struct ath_desc *ds = bf->bf_lastds; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_q; ath_bufhead bf_cq; int seq_st, tx_ok; int hasba, isaggr; uint32_t ba[2]; struct ath_buf *bf, *bf_next; int ba_index; int drops = 0; int nframes = 0, nbad = 0, nf; int pktlen; int agglen, rc_agglen; /* XXX there's too much on the stack? */ struct ath_rc_series rc[ATH_RC_NUM]; int txseq; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", __func__, atid->hwq_depth); /* * Take a copy; this may be needed -after- bf_first * has been completed and freed. */ ts = bf_first->bf_status.ds_txstat; agglen = bf_first->bf_state.bfs_al; rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* The TID state is kept behind the TXQ lock */ ATH_TX_LOCK(sc); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. * * XXX this is duplicate work, ew. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Punt cleanup to the relevant function, not our problem now */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); ath_tx_comp_cleanup_aggr(sc, bf_first); return; } /* * If the frame is filtered, transition to filtered frame * mode and add this to the filtered frame list. * * XXX TODO: figure out how this interoperates with * BAR, pause and cleanup states. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, fail=%d\n", __func__, fail); ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); /* Remove from BAW */ TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If any intermediate frames in the BAW were dropped when * handling filtering things, send a BAR. */ if (drops) ath_tx_tid_bar_suspend(sc, atid); /* * Finish up by sending a BAR if required and freeing * the frames outside of the TX lock. */ goto finish_send_bar; } /* * XXX for now, use the first frame in the aggregate for * XXX rate control completion; it's at least consistent. */ pktlen = bf_first->bf_state.bfs_pktlen; /* * Handle errors first! * * Here, handle _any_ error as a "exceeded retries" error. * Later on (when filtered frames are to be specially handled) * it'll have to be expanded. */ #if 0 if (ts.ts_status & HAL_TXERR_XRETRY) { #endif if (ts.ts_status != 0) { ATH_TX_UNLOCK(sc); ath_tx_comp_aggr_error(sc, bf_first, atid); return; } tap = ath_tx_get_tx_tid(an, tid); /* * extract starting sequence and block-ack bitmap */ /* XXX endian-ness of seq_st, ba? */ seq_st = ts.ts_seqnum; hasba = !! (ts.ts_flags & HAL_TX_BA); tx_ok = (ts.ts_status == 0); isaggr = bf_first->bf_state.bfs_aggr; ba[0] = ts.ts_ba_low; ba[1] = ts.ts_ba_high; /* * Copy the TX completion status and the rate control * series from the first descriptor, as it may be freed * before the rate control code can get its grubby fingers * into things. */ memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, isaggr, seq_st, hasba, ba[0], ba[1]); /* * The reference driver doesn't do this; it simply ignores * this check in its entirety. * * I've seen this occur when using iperf to send traffic * out tid 1 - the aggregate frames are all marked as TID 1, * but the TXSTATUS has TID=0. So, let's just ignore this * check. */ #if 0 /* Occasionally, the MAC sends a tx status for the wrong TID. */ if (tid != ts.ts_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", __func__, tid, ts.ts_tid); tx_ok = 0; } #endif /* AR5416 BA bug; this requires an interface reset */ if (isaggr && tx_ok && (! hasba)) { device_printf(sc->sc_dev, "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " "seq_st=%d\n", __func__, hasba, tx_ok, isaggr, seq_st); taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); /* And as we can't really trust the BA here .. */ ba[0] = 0; ba[1] = 0; seq_st = 0; #ifdef ATH_DEBUG ath_printtxbuf(sc, bf_first, sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); #endif } /* * Walk the list of frames, figure out which ones were correctly * sent and which weren't. */ bf = bf_first; nf = bf_first->bf_state.bfs_nframes; /* bf_first is going to be invalid once this list is walked */ bf_first = NULL; /* * Walk the list of completed frames and determine * which need to be completed and which need to be * retransmitted. * * For completed frames, the completion functions need * to be called at the end of this function as the last * node reference may free the node. * * Finally, since the TXQ lock can't be held during the * completion callback (to avoid lock recursion), * the completion calls have to be done outside of the * lock. */ while (bf) { nframes++; ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno)); bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: checking bf=%p seqno=%d; ack=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), ATH_BA_ISSET(ba, ba_index)); if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { sc->sc_stats.ast_tx_aggr_ok++; ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } else { sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } nbad++; } bf = bf_next; } /* * Now that the BAW updates have been done, unlock * * txseq is grabbed before the lock is released so we * have a consistent view of what -was- in the BAW. * Anything after this point will not yet have been * TXed. */ txseq = tap->txa_start; ATH_TX_UNLOCK(sc); if (nframes != nf) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: num frames seen=%d; bf nframes=%d\n", __func__, nframes, nf); /* * Now we know how many frames were bad, call the rate * control code. */ if (fail == 0) { ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen, nframes, nbad); } /* * send bar if we dropped any frames */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ATH_TX_LOCK(sc); ath_tx_tid_bar_suspend(sc, atid); ATH_TX_UNLOCK(sc); } DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start now %d\n", __func__, tap->txa_start); ATH_TX_LOCK(sc); /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Reschedule to grab some further frames. */ ath_tx_tid_sched(sc, atid); /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); finish_send_bar: /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Do deferred completion */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of unaggregated frames in an ADDBA * session. * * Fail is set to 1 if the entry is being freed via a call to * ath_tx_draintxq(). */ static void ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; int drops = 0; /* * Take a copy of this; filtering/cloning the frame may free the * bf pointer. */ ts = bf->bf_status.ds_txstat; /* * Update rate control status here, before we possibly * punt to retry or cleanup. * * Do it outside of the TXQ lock. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, &bf->bf_status.ds_txstat, bf->bf_state.bfs_pktlen, bf->bf_state.bfs_pktlen, 1, (ts.ts_status == 0) ? 0 : 1); /* * This is called early so atid->hwq_depth can be tracked. * This unfortunately means that it's released and regrabbed * during retry and cleanup. That's rather inefficient. */ ATH_TX_LOCK(sc); if (tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, SEQNO(bf->bf_state.bfs_seqno)); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * If a cleanup is in progress, punt to comp_cleanup; * rather than handling it here. It's thus their * responsibility to clean up, call the completion * function in net80211, etc. */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", __func__); ath_tx_comp_cleanup_unaggr(sc, bf); return; } /* * XXX TODO: how does cleanup, BAR and filtered frame handling * overlap? * * If the frame is filtered OR if it's any failure but * the TID is filtered, the frame must be added to the * filtered frame list. * * However - a busy buffer can't be added to the filtered * list as it will end up being recycled without having * been made available for the hardware. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { int freeframe; if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, fail=%d\n", __func__, fail); freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); /* * If freeframe=0 then bf is no longer ours; don't * touch it. */ if (freeframe) { /* Remove from BAW */ if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If the frame couldn't be filtered, treat it as a drop and * prepare to send a BAR. */ if (freeframe && drops) ath_tx_tid_bar_suspend(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* * If freeframe is set, then the frame couldn't be * cloned and bf is still valid. Just complete/free it. */ if (freeframe) ath_tx_default_comp(sc, bf, fail); return; } /* * Don't bother with the retry check if all frames * are being failed (eg during queue deletion.) */ #if 0 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { #endif if (fail == 0 && ts.ts_status != 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", __func__); ath_tx_aggr_retry_unaggr(sc, bf); return; } /* Success? Complete */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, fail); /* bf is freed at this point */ } void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { if (bf->bf_state.bfs_aggr) ath_tx_aggr_comp_aggr(sc, bf, fail); else ath_tx_aggr_comp_unaggr(sc, bf, fail); } /* * Grab the software queue depth that we COULD transmit. * * This includes checks if it's in the BAW, whether it's a frame * that is supposed to be in the BAW. Other checks could be done; * but for now let's try and avoid doing the whole of ath_tx_form_aggr() * here. */ static int ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ieee80211_tx_ampdu *tap; int nbytes = 0; ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); /* * Iterate over each buffer and sum the pkt_len. * Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't * ever queue more than that in a single frame. */ TAILQ_FOREACH(bf, &tid->tid_q, bf_list) { /* * TODO: I'm not sure if we're going to hit cases where * no frames get sent because the list is empty. */ /* Check if it's in the BAW */ if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno)))) { break; } /* Check if it's even supposed to be in the BAW */ if (! bf->bf_state.bfs_dobaw) { break; } nbytes += bf->bf_state.bfs_pktlen; if (nbytes >= ATH_AGGR_MAXSIZE) break; /* * Check if we're likely going to leak a frame * as part of a PSPOLL. Break out at this point; * we're only going to send a single frame anyway. */ if (an->an_leak_count) { break; } } return MIN(nbytes, ATH_AGGR_MAXSIZE); } /* * Schedule some packets from the given node/TID to the hardware. * * This is the aggregate version. */ void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; struct ieee80211_tx_ampdu *tap; ATH_AGGR_STATUS status; ath_bufhead bf_q; int swq_pktbytes; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* * XXX TODO: If we're called for a queue that we're leaking frames to, * ensure we only leak one. */ tap = ath_tx_get_tx_tid(an, tid->tid); if (tid->tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: called for TID=NONQOS_TID?\n", __func__); for (;;) { status = ATH_AGGR_DONE; /* * If the upper layer has paused the TID, don't * queue any further packets. * * This can also occur from the completion task because * of packet loss; but as its serialised with this code, * it won't "appear" half way through queuing packets. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } /* * If the packet doesn't fall within the BAW (eg a NULL * data frame), schedule it directly; continue. */ if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n", __func__); ATH_TID_REMOVE(tid, bf, bf_list); if (bf->bf_state.bfs_nframes > 1) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: aggr=%d, nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); /* * This shouldn't happen - such frames shouldn't * ever have been queued as an aggregate in the * first place. However, make sure the fields * are correctly setup just to be totally sure. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); sc->sc_aggr_stats.aggr_nonbaw_pkt++; /* Queue the packet; continue */ goto queuepkt; } TAILQ_INIT(&bf_q); /* * Loop over the swq to find out how long * each packet is (up until 64k) and provide that * to the rate control lookup. */ swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid); ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true); /* * Note this only is used for the fragment paths and * should really be rethought out if we want to do * things like an RTS burst across >1 aggregate. */ ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); status = ath_tx_form_aggr(sc, an, tid, &bf_q); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: ath_tx_form_aggr() status=%d\n", __func__, status); /* * No frames to be picked up - out of BAW */ if (TAILQ_EMPTY(&bf_q)) break; /* * This assumes that the descriptor list in the ath_bufhead * are already linked together via bf_next pointers. */ bf = TAILQ_FIRST(&bf_q); if (status == ATH_AGGR_8K_LIMITED) sc->sc_aggr_stats.aggr_rts_aggr_limited++; /* * If it's the only frame send as non-aggregate * assume that ath_tx_form_aggr() has checked * whether it's in the BAW and added it appropriately. */ if (bf->bf_state.bfs_nframes == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: single-frame aggregate\n", __func__); /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); if (status == ATH_AGGR_BAW_CLOSED) sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; else sc->sc_aggr_stats.aggr_single_pkt++; } else { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: multi-frame aggregate: %d frames, " "length %d\n", __func__, bf->bf_state.bfs_nframes, bf->bf_state.bfs_al); bf->bf_state.bfs_aggr = 1; sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; sc->sc_aggr_stats.aggr_aggr_pkt++; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* * Calculate the duration/protection as required. */ ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); /* * Update the rate and rtscts information based on the * rate decision made by the rate control code; * the first frame in the aggregate needs it. */ ath_tx_set_rtscts(sc, bf); /* * Setup the relevant descriptor fields * for aggregation. The first descriptor * already points to the rest in the chain. */ ath_tx_setds_11n(sc, bf); } queuepkt: /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); /* * Update leak count and frame config if were leaking frames. * * XXX TODO: it should update all frames in an aggregate * correctly! */ ath_tx_leak_count_update(sc, tid, bf); /* Punt to txq */ ath_tx_handoff(sc, txq, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* * Break out if ath_tx_form_aggr() indicated * there can't be any further progress (eg BAW is full.) * Checking for an empty txq is done above. * * XXX locking on txq here? */ /* XXX TXQ locking */ if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || (status == ATH_AGGR_BAW_CLOSED || status == ATH_AGGR_LEAK_CLOSED)) break; } } /* * Schedule some packets from the given node/TID to the hardware. * * XXX TODO: this routine doesn't enforce the maximum TXQ depth. * It just dumps frames into the TXQ. We should limit how deep * the transmit queue can grow for frames dispatched to the given * TXQ. * * To avoid locking issues, either we need to own the TXQ lock * at this point, or we need to pass in the maximum frame count * from the caller. */ void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", __func__, an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Check - is AMPDU pending or running? then print out something */ if (ath_tx_ampdu_pending(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", __func__, tid->tid); if (ath_tx_ampdu_running(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", __func__, tid->tid); for (;;) { /* * If the upper layers have paused the TID, don't * queue any further packets. * * XXX if we are leaking frames, make sure we decrement * that counter _and_ we continue here. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } ATH_TID_REMOVE(tid, bf, bf_list); /* Sanity check! */ if (tid->tid != bf->bf_state.bfs_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" " tid %d\n", __func__, bf->bf_state.bfs_tid, tid->tid); } /* Normal completion handler */ bf->bf_comp = ath_tx_normal_comp; /* * Override this for now, until the non-aggregate * completion handler correctly handles software retransmits. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Program descriptors + rate control */ ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* Punt to hardware or software txq */ ath_tx_handoff(sc, txq, bf); } } /* * Schedule some packets to the given hardware queue. * * This function walks the list of TIDs (ie, ath_node TIDs * with queued traffic) and attempts to schedule traffic * from them. * * TID scheduling is implemented as a FIFO, with TIDs being * added to the end of the queue after some frames have been * scheduled. */ void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid, *next, *last; ATH_TX_LOCK_ASSERT(sc); /* * For non-EDMA chips, aggr frames that have been built are * in axq_aggr_depth, whether they've been scheduled or not. * There's no FIFO, so txq->axq_depth is what's been scheduled * to the hardware. * * For EDMA chips, we do it in two stages. The existing code * builds a list of frames to go to the hardware and the EDMA * code turns it into a single entry to push into the FIFO. * That way we don't take up one packet per FIFO slot. * We do push one aggregate per FIFO slot though, just to keep * things simple. * * The FIFO depth is what's in the hardware; the txq->axq_depth * is what's been scheduled to the FIFO. * * fifo.axq_depth is the number of frames (or aggregates) pushed * into the EDMA FIFO. For multi-frame lists, this is the number * of frames pushed in. * axq_fifo_depth is the number of FIFO slots currently busy. */ /* For EDMA and non-EDMA, check built/scheduled against aggr limit */ if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } /* * For non-EDMA chips, axq_depth is the "what's scheduled to * the hardware list". For EDMA it's "What's built for the hardware" * and fifo.axq_depth is how many frames have been dispatched * already to the hardware. */ if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { /* * Suspend paused queues here; they'll be resumed * once the addba completes or times out. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", __func__, tid->tid, tid->paused); ath_tx_tid_unsched(sc, tid); /* * This node may be in power-save and we're leaking * a frame; be careful. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { goto loop_done; } if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); else ath_tx_tid_hw_queue_norm(sc, tid->an, tid); /* Not empty? Re-schedule */ if (tid->axq_depth != 0) ath_tx_tid_sched(sc, tid); /* * Give the software queue time to aggregate more * packets. If we aren't running aggregation then * we should still limit the hardware queue depth. */ /* XXX TXQ locking */ if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { break; } if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { break; } loop_done: /* * If this was the last entry on the original list, stop. * Otherwise nodes that have been rescheduled onto the end * of the TID FIFO list will just keep being rescheduled. * * XXX What should we do about nodes that were paused * but are pending a leaking frame in response to a ps-poll? * They'll be put at the front of the list; so they'll * prematurely trigger this condition! Ew. */ if (tid == last) break; } } /* * TX addba handling */ /* * Return net80211 TID struct pointer, or NULL for none */ struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an, int tid) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return NULL; tap = &ni->ni_tx_ampdu[tid]; return tap; } /* * Is AMPDU-TX running? */ static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not running */ return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); } /* * Is AMPDU-TX negotiation pending? */ static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not pending */ return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); } /* * Is AMPDU-TX pending for the given TID? */ /* * Method to handle sending an ADDBA request. * * We tap this so the relevant flags can be set to pause the TID * whilst waiting for the response. * * XXX there's no timeout handler we can override? */ int ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; /* * XXX danger Will Robinson! * * Although the taskqueue may be running and scheduling some more * packets, these should all be _before_ the addba sequence number. * However, net80211 will keep self-assigning sequence numbers * until addba has been negotiated. * * In the past, these packets would be "paused" (which still works * fine, as they're being scheduled to the driver in the same * serialised method which is calling the addba request routine) * and when the aggregation session begins, they'll be dequeued * as aggregate packets and added to the BAW. However, now there's * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these * packets. Thus they never get included in the BAW tracking and * this can cause the initial burst of packets after the addba * negotiation to "hang", as they quickly fall outside the BAW. * * The "eventual" solution should be to tag these packets with * dobaw. Although net80211 has given us a sequence number, * it'll be "after" the left edge of the BAW and thus it'll * fall within it. */ ATH_TX_LOCK(sc); /* * This is a bit annoying. Until net80211 HT code inherits some * (any) locking, we may have this called in parallel BUT only * one response/timeout will be called. Grr. */ if (atid->addba_tx_pending == 0) { ath_tx_tid_pause(sc, atid); atid->addba_tx_pending = 1; } ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", dialogtoken, baparamset, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } /* * Handle an ADDBA response. * * We unpause the queue so TX'ing can resume. * * Any packets TX'ed from this point should be "aggregate" (whether * aggregate or not) so the BAW is updated. * * Note! net80211 keeps self-assigning sequence numbers until * ampdu is negotiated. This means the initially-negotiated BAW left * edge won't match the ni->ni_txseq. * * So, being very dirty, the BAW left edge is "slid" here to match * ni->ni_txseq. * * What likely SHOULD happen is that all packets subsequent to the * addba request should be tagged as aggregate and queued as non-aggregate * frames; thus updating the BAW. For now though, I'll just slide the * window. */ int ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int code, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int r; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", status, code, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); /* * Call this first, so the interface flags get updated * before the TID is unpaused. Otherwise a race condition * exists where the unpaused TID still doesn't yet have * IEEE80211_AGGR_RUNNING set. */ r = sc->sc_addba_response(ni, tap, status, code, batimeout); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; /* * XXX dirty! * Slide the BAW left edge to wherever net80211 left it for us. * Read above for more information. */ tap->txa_start = ni->ni_txseqs[tid]; ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); return r; } /* * Stop ADDBA on a queue. * * This can be called whilst BAR TX is currently active on the queue, * so make sure this is unblocked before continuing. */ void ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; ath_bufhead bf_cq; struct ath_buf *bf; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", __func__, ni->ni_macaddr, ":"); /* * Pause TID traffic early, so there aren't any races * Unblock the pending BAR held traffic, if it's currently paused. */ ATH_TX_LOCK(sc); ath_tx_tid_pause(sc, atid); if (atid->bar_wait) { /* * bar_unsuspend() expects bar_tx == 1, as it should be * called from the TX completion path. This quietens * the warning. It's cleared for us anyway. */ atid->bar_tx = 1; ath_tx_tid_bar_unsuspend(sc, atid); } ATH_TX_UNLOCK(sc); /* There's no need to hold the TXQ lock here */ sc->sc_addba_stop(ni, tap); /* * ath_tx_tid_cleanup will resume the TID if possible, otherwise * it'll set the cleanup flag, and it'll be unpaused once * things have been cleaned up. */ TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); /* * In case there's a followup call to this, only call it * if we don't have a cleanup in progress. * * Since we've paused the queue above, we need to make * sure we unpause if there's already a cleanup in * progress - it means something else is also doing * this stuff, so we don't need to also keep it paused. */ if (atid->cleanup_inprogress) { ath_tx_tid_resume(sc, atid); } else { ath_tx_tid_cleanup(sc, an, tid, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! atid->cleanup_inprogress) ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Handle a node reassociation. * * We may have a bunch of frames queued to the hardware; those need * to be marked as cleanup. */ void ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *tid; int i; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { tid = &an->an_tid[i]; if (tid->hwq_depth == 0) continue; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: TID %d: cleaning up TID\n", __func__, an->an_node.ni_macaddr, ":", i); /* * In case there's a followup call to this, only call it * if we don't have a cleanup in progress. */ if (! tid->cleanup_inprogress) { ath_tx_tid_pause(sc, tid); ath_tx_tid_cleanup(sc, an, i, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! tid->cleanup_inprogress) ath_tx_tid_resume(sc, tid); } } ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Note: net80211 bar_timeout() doesn't call this function on BAR failure; * it simply tears down the aggregation session. Ew. * * It however will call ieee80211_ampdu_stop() which will call * ic->ic_addba_stop(). * * XXX This uses a hard-coded max BAR count value; the whole * XXX BAR TX success or failure should be better handled! */ void ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int attempts = tap->txa_attempts; int old_txa_start; DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", __func__, ni->ni_macaddr, ":", tap->txa_tid, atid->tid, status, attempts, tap->txa_start, tap->txa_seqpending); /* Note: This may update the BAW details */ /* * XXX What if this does slide the BAW along? We need to somehow * XXX either fix things when it does happen, or prevent the * XXX seqpending value to be anything other than exactly what * XXX the hell we want! * * XXX So for now, how I do this inside the TX lock for now * XXX and just correct it afterwards? The below condition should * XXX never happen and if it does I need to fix all kinds of things. */ ATH_TX_LOCK(sc); old_txa_start = tap->txa_start; sc->sc_bar_response(ni, tap, status); if (tap->txa_start != old_txa_start) { device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", __func__, tid, tap->txa_start, old_txa_start); } tap->txa_start = old_txa_start; ATH_TX_UNLOCK(sc); /* Unpause the TID */ /* * XXX if this is attempt=50, the TID will be downgraded * XXX to a non-aggregate session. So we must unpause the * XXX TID here or it'll never be done. * * Also, don't call it if bar_tx/bar_wait are 0; something * has beaten us to the punch? (XXX figure out what?) */ if (status == 0 || attempts == 50) { ATH_TX_LOCK(sc); if (atid->bar_tx == 0 || atid->bar_wait == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: huh? bar_tx=%d, bar_wait=%d\n", __func__, atid->bar_tx, atid->bar_wait); else ath_tx_tid_bar_unsuspend(sc, atid); ATH_TX_UNLOCK(sc); } } /* * This is called whenever the pending ADDBA request times out. * Unpause and reschedule the TID. */ void ath_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: TID=%d, called; resuming\n", __func__, ni->ni_macaddr, ":", tid); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; ATH_TX_UNLOCK(sc); /* Note: This updates the aggregate state to (again) pending */ sc->sc_addba_response_timeout(ni, tap); /* Unpause the TID; which reschedules it */ ATH_TX_LOCK(sc); ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); } /* * Check if a node is asleep or not. */ int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) { ATH_TX_LOCK_ASSERT(sc); return (an->an_is_powersave); } /* * Mark a node as currently "in powersaving." * This suspends all traffic on the node. * * This must be called with the node/tx locks free. * * XXX TODO: the locking silliness below is due to how the node * locking currently works. Right now, the node lock is grabbed * to do rate control lookups and these are done with the TX * queue lock held. This means the node lock can't be grabbed * first here or a LOR will occur. * * Eventually (hopefully!) the TX path code will only grab * the TXQ lock when transmitting and the ath_node lock when * doing node/TID operations. There are other complications - * the sched/unsched operations involve walking the per-txq * 'active tid' list and this requires both locks to be held. */ void ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); /* Suspend all traffic on the node */ ATH_TX_LOCK(sc); if (an->an_is_powersave) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: node was already asleep!\n", __func__, an->an_node.ni_macaddr, ":"); ATH_TX_UNLOCK(sc); return; } for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_pause(sc, atid); } /* Mark node as in powersaving */ an->an_is_powersave = 1; ATH_TX_UNLOCK(sc); } /* * Mark a node as currently "awake." * This resumes all traffic to the node. */ void ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); /* !? */ if (an->an_is_powersave == 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: an=%p: node was already awake\n", __func__, an); return; } /* Mark node as awake */ an->an_is_powersave = 0; /* * Clear any pending leaked frame requests */ an->an_leak_count = 0; for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); } static int ath_legacy_dma_txsetup(struct ath_softc *sc) { /* nothing new needed */ return (0); } static int ath_legacy_dma_txteardown(struct ath_softc *sc) { /* nothing new needed */ return (0); } void ath_xmit_setup_legacy(struct ath_softc *sc) { /* * For now, just set the descriptor length to sizeof(ath_desc); * worry about extracting the real length out of the HAL later. */ sc->sc_tx_desclen = sizeof(struct ath_desc); sc->sc_tx_statuslen = sizeof(struct ath_desc); sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; sc->sc_tx.xmit_drain = ath_legacy_tx_drain; } diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c index 378dd717a3eb..04190821d9b9 100644 --- a/sys/dev/usb/wlan/if_uath.c +++ b/sys/dev/usb/wlan/if_uath.c @@ -1,2876 +1,2876 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause AND BSD-1-Clause) * * Copyright (c) 2006 Sam Leffler, Errno Consulting * Copyright (c) 2008-2009 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /* * This driver is distantly derived from a driver of the same name * by Damien Bergamini. The original copyright is included below: * * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /*- * Driver for Atheros AR5523 USB parts. * * The driver requires firmware to be loaded into the device. This * is done on device discovery from a user application (uathload) * that is launched by devd when a device with suitable product ID * is recognized. Once firmware has been loaded the device will * reset the USB port and re-attach with the original product ID+1 * and this driver will be attached. The firmware is licensed for * general use (royalty free) and may be incorporated in products. * Note that the firmware normally packaged with the NDIS drivers * for these devices does not work in this way and so does not work * with this driver. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #include #include static SYSCTL_NODE(_hw_usb, OID_AUTO, uath, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB Atheros"); static int uath_countrycode = CTRY_DEFAULT; /* country code */ SYSCTL_INT(_hw_usb_uath, OID_AUTO, countrycode, CTLFLAG_RWTUN, &uath_countrycode, 0, "country code"); static int uath_regdomain = 0; /* regulatory domain */ SYSCTL_INT(_hw_usb_uath, OID_AUTO, regdomain, CTLFLAG_RD, &uath_regdomain, 0, "regulatory domain"); #ifdef UATH_DEBUG int uath_debug = 0; SYSCTL_INT(_hw_usb_uath, OID_AUTO, debug, CTLFLAG_RWTUN, &uath_debug, 0, "uath debug level"); enum { UATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ UATH_DEBUG_XMIT_DUMP = 0x00000002, /* xmit dump */ UATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ UATH_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */ UATH_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */ UATH_DEBUG_RECV_ALL = 0x00000020, /* trace all frames (beacons) */ UATH_DEBUG_INIT = 0x00000040, /* initialization of dev */ UATH_DEBUG_DEVCAP = 0x00000080, /* dev caps */ UATH_DEBUG_CMDS = 0x00000100, /* commands */ UATH_DEBUG_CMDS_DUMP = 0x00000200, /* command buffer dump */ UATH_DEBUG_RESET = 0x00000400, /* reset processing */ UATH_DEBUG_STATE = 0x00000800, /* 802.11 state transitions */ UATH_DEBUG_MULTICAST = 0x00001000, /* multicast */ UATH_DEBUG_WME = 0x00002000, /* WME */ UATH_DEBUG_CHANNEL = 0x00004000, /* channel */ UATH_DEBUG_RATES = 0x00008000, /* rates */ UATH_DEBUG_CRYPTO = 0x00010000, /* crypto */ UATH_DEBUG_LED = 0x00020000, /* LED */ UATH_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif /* recognized device vendors/products */ static const STRUCT_USB_HOST_ID uath_devs[] = { #define UATH_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } UATH_DEV(ACCTON, SMCWUSBTG2), UATH_DEV(ATHEROS, AR5523), UATH_DEV(ATHEROS2, AR5523_1), UATH_DEV(ATHEROS2, AR5523_2), UATH_DEV(ATHEROS2, AR5523_3), UATH_DEV(CONCEPTRONIC, AR5523_1), UATH_DEV(CONCEPTRONIC, AR5523_2), UATH_DEV(DLINK, DWLAG122), UATH_DEV(DLINK, DWLAG132), UATH_DEV(DLINK, DWLG132), UATH_DEV(DLINK2, DWA120), UATH_DEV(GIGASET, AR5523), UATH_DEV(GIGASET, SMCWUSBTG), UATH_DEV(GLOBALSUN, AR5523_1), UATH_DEV(GLOBALSUN, AR5523_2), UATH_DEV(NETGEAR, WG111U), UATH_DEV(NETGEAR3, WG111T), UATH_DEV(NETGEAR3, WPN111), UATH_DEV(NETGEAR3, WPN111_2), UATH_DEV(UMEDIA, TEW444UBEU), UATH_DEV(UMEDIA, AR5523_2), UATH_DEV(WISTRONNEWEB, AR5523_1), UATH_DEV(WISTRONNEWEB, AR5523_2), UATH_DEV(ZCOM, AR5523) #undef UATH_DEV }; static usb_callback_t uath_intr_rx_callback; static usb_callback_t uath_intr_tx_callback; static usb_callback_t uath_bulk_rx_callback; static usb_callback_t uath_bulk_tx_callback; static const struct usb_config uath_usbconfig[UATH_N_XFERS] = { [UATH_INTR_RX] = { .type = UE_BULK, .endpoint = 0x1, .direction = UE_DIR_IN, .bufsize = UATH_MAX_CMDSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = uath_intr_rx_callback }, [UATH_INTR_TX] = { .type = UE_BULK, .endpoint = 0x1, .direction = UE_DIR_OUT, .bufsize = UATH_MAX_CMDSZ * UATH_CMD_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1, }, .callback = uath_intr_tx_callback, .timeout = UATH_CMD_TIMEOUT }, [UATH_BULK_RX] = { .type = UE_BULK, .endpoint = 0x2, .direction = UE_DIR_IN, .bufsize = MCLBYTES, .flags = { .ext_buffer = 1, .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = uath_bulk_rx_callback }, [UATH_BULK_TX] = { .type = UE_BULK, .endpoint = 0x2, .direction = UE_DIR_OUT, .bufsize = UATH_MAX_TXBUFSZ * UATH_TX_DATA_LIST_COUNT, .flags = { .force_short_xfer = 1, .pipe_bof = 1 }, .callback = uath_bulk_tx_callback, .timeout = UATH_DATA_TIMEOUT } }; static struct ieee80211vap *uath_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void uath_vap_delete(struct ieee80211vap *); static int uath_alloc_cmd_list(struct uath_softc *, struct uath_cmd []); static void uath_free_cmd_list(struct uath_softc *, struct uath_cmd []); static int uath_host_available(struct uath_softc *); static int uath_get_capability(struct uath_softc *, uint32_t, uint32_t *); static int uath_get_devcap(struct uath_softc *); static struct uath_cmd * uath_get_cmdbuf(struct uath_softc *); static int uath_cmd_read(struct uath_softc *, uint32_t, const void *, int, void *, int, int); static int uath_cmd_write(struct uath_softc *, uint32_t, const void *, int, int); static void uath_stat(void *); #ifdef UATH_DEBUG static void uath_dump_cmd(const uint8_t *, int, char); static const char * uath_codename(int); #endif static int uath_get_devstatus(struct uath_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int uath_get_status(struct uath_softc *, uint32_t, void *, int); static int uath_alloc_rx_data_list(struct uath_softc *); static int uath_alloc_tx_data_list(struct uath_softc *); static void uath_free_rx_data_list(struct uath_softc *); static void uath_free_tx_data_list(struct uath_softc *); static int uath_init(struct uath_softc *); static void uath_stop(struct uath_softc *); static void uath_parent(struct ieee80211com *); static int uath_transmit(struct ieee80211com *, struct mbuf *); static void uath_start(struct uath_softc *); static int uath_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void uath_scan_start(struct ieee80211com *); static void uath_scan_end(struct ieee80211com *); static void uath_set_channel(struct ieee80211com *); static void uath_update_mcast(struct ieee80211com *); static void uath_update_promisc(struct ieee80211com *); static int uath_config(struct uath_softc *, uint32_t, uint32_t); static int uath_config_multi(struct uath_softc *, uint32_t, const void *, int); static int uath_switch_channel(struct uath_softc *, struct ieee80211_channel *); static int uath_set_rxfilter(struct uath_softc *, uint32_t, uint32_t); static void uath_watchdog(void *); static void uath_abort_xfers(struct uath_softc *); static int uath_dataflush(struct uath_softc *); static int uath_cmdflush(struct uath_softc *); static int uath_flush(struct uath_softc *); static int uath_set_ledstate(struct uath_softc *, int); static int uath_set_chan(struct uath_softc *, struct ieee80211_channel *); static int uath_reset_tx_queues(struct uath_softc *); static int uath_wme_init(struct uath_softc *); static struct uath_data * uath_getbuf(struct uath_softc *); static int uath_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int uath_set_key(struct uath_softc *, const struct ieee80211_key *, int); static int uath_set_keys(struct uath_softc *, struct ieee80211vap *); static void uath_sysctl_node(struct uath_softc *); static int uath_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != UATH_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != UATH_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(uath_devs, sizeof(uath_devs), uaa)); } static int uath_attach(device_t dev) { struct uath_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[IEEE80211_MODE_BYTES]; uint8_t iface_index = UATH_IFACE_INDEX; /* XXX */ usb_error_t error; sc->sc_dev = dev; sc->sc_udev = uaa->device; #ifdef UATH_DEBUG sc->sc_debug = uath_debug; #endif device_set_usb_desc(dev); /* * Only post-firmware devices here. */ mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init(&sc->stat_ch, 0); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, uath_usbconfig, UATH_N_XFERS, sc, &sc->sc_mtx); if (error) { device_printf(dev, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto fail; } sc->sc_cmd_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_INTR_TX], 0); sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_BULK_TX], 0); /* * Setup buffers for firmware commands. */ error = uath_alloc_cmd_list(sc, sc->sc_cmd); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx command list\n"); goto fail1; } /* * We're now ready to send+receive firmware commands. */ UATH_LOCK(sc); error = uath_host_available(sc); if (error != 0) { device_printf(sc->sc_dev, "could not initialize adapter\n"); goto fail2; } error = uath_get_devcap(sc); if (error != 0) { device_printf(sc->sc_dev, "could not get device capabilities\n"); goto fail2; } UATH_UNLOCK(sc); /* Create device sysctl node. */ uath_sysctl_node(sc); UATH_LOCK(sc); error = uath_get_devstatus(sc, ic->ic_macaddr); if (error != 0) { device_printf(sc->sc_dev, "could not get device status\n"); goto fail2; } /* * Allocate xfers for Rx/Tx data pipes. */ error = uath_alloc_rx_data_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx data list\n"); goto fail2; } error = uath_alloc_tx_data_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx data list\n"); goto fail2; } UATH_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ IEEE80211_C_MONITOR | /* monitor mode supported */ IEEE80211_C_TXPMGT | /* tx power management */ IEEE80211_C_SHPREAMBLE | /* short preamble supported */ IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_WPA | /* 802.11i */ IEEE80211_C_BGSCAN | /* capable of bg scanning */ IEEE80211_C_TXFRAG; /* handle tx frags */ /* put a regulatory domain to reveal informations. */ uath_regdomain = sc->sc_devcap.regDomain; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if ((sc->sc_devcap.analog5GhzRevision & 0xf0) == 0x30) setbit(bands, IEEE80211_MODE_11A); /* XXX turbo */ ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_raw_xmit = uath_raw_xmit; ic->ic_scan_start = uath_scan_start; ic->ic_scan_end = uath_scan_end; ic->ic_set_channel = uath_set_channel; ic->ic_vap_create = uath_vap_create; ic->ic_vap_delete = uath_vap_delete; ic->ic_update_mcast = uath_update_mcast; ic->ic_update_promisc = uath_update_promisc; ic->ic_transmit = uath_transmit; ic->ic_parent = uath_parent; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), UATH_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), UATH_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); fail2: UATH_UNLOCK(sc); uath_free_cmd_list(sc, sc->sc_cmd); fail1: usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS); fail: return (error); } static int uath_detach(device_t dev) { struct uath_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; unsigned x; /* * Prevent further allocations from RX/TX/CMD * data lists and ioctls */ UATH_LOCK(sc); sc->sc_flags |= UATH_FLAG_INVALID; STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); STAILQ_INIT(&sc->sc_cmd_inactive); uath_stop(sc); UATH_UNLOCK(sc); callout_drain(&sc->stat_ch); callout_drain(&sc->watchdog_ch); /* drain USB transfers */ for (x = 0; x != UATH_N_XFERS; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* free data buffers */ UATH_LOCK(sc); uath_free_rx_data_list(sc); uath_free_tx_data_list(sc); uath_free_cmd_list(sc, sc->sc_cmd); UATH_UNLOCK(sc); /* free USB transfers and some data buffers */ usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); mtx_destroy(&sc->sc_mtx); return (0); } static void uath_free_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[]) { int i; for (i = 0; i != UATH_CMD_LIST_COUNT; i++) cmds[i].buf = NULL; } static int uath_alloc_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[]) { int i; STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); STAILQ_INIT(&sc->sc_cmd_inactive); for (i = 0; i != UATH_CMD_LIST_COUNT; i++) { struct uath_cmd *cmd = &cmds[i]; cmd->sc = sc; /* backpointer for callbacks */ cmd->msgid = i; cmd->buf = ((uint8_t *)sc->sc_cmd_dma_buf) + (i * UATH_MAX_CMDSZ); STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next); UATH_STAT_INC(sc, st_cmd_inactive); } return (0); } static int uath_host_available(struct uath_softc *sc) { struct uath_cmd_host_available setup; UATH_ASSERT_LOCKED(sc); /* inform target the host is available */ setup.sw_ver_major = htobe32(ATH_SW_VER_MAJOR); setup.sw_ver_minor = htobe32(ATH_SW_VER_MINOR); setup.sw_ver_patch = htobe32(ATH_SW_VER_PATCH); setup.sw_ver_build = htobe32(ATH_SW_VER_BUILD); return uath_cmd_read(sc, WDCMSG_HOST_AVAILABLE, &setup, sizeof setup, NULL, 0, 0); } #ifdef UATH_DEBUG static void uath_dump_cmd(const uint8_t *buf, int len, char prefix) { const char *sep = ""; int i; for (i = 0; i < len; i++) { if ((i % 16) == 0) { printf("%s%c ", sep, prefix); sep = "\n"; } else if ((i % 4) == 0) printf(" "); printf("%02x", buf[i]); } printf("\n"); } static const char * uath_codename(int code) { static const char *names[] = { "0x00", "HOST_AVAILABLE", "BIND", "TARGET_RESET", "TARGET_GET_CAPABILITY", "TARGET_SET_CONFIG", "TARGET_GET_STATUS", "TARGET_GET_STATS", "TARGET_START", "TARGET_STOP", "TARGET_ENABLE", "TARGET_DISABLE", "CREATE_CONNECTION", "UPDATE_CONNECT_ATTR", "DELETE_CONNECT", "SEND", "FLUSH", "STATS_UPDATE", "BMISS", "DEVICE_AVAIL", "SEND_COMPLETE", "DATA_AVAIL", "SET_PWR_MODE", "BMISS_ACK", "SET_LED_STEADY", "SET_LED_BLINK", "SETUP_BEACON_DESC", "BEACON_INIT", "RESET_KEY_CACHE", "RESET_KEY_CACHE_ENTRY", "SET_KEY_CACHE_ENTRY", "SET_DECOMP_MASK", "SET_REGULATORY_DOMAIN", "SET_LED_STATE", "WRITE_ASSOCID", "SET_STA_BEACON_TIMERS", "GET_TSF", "RESET_TSF", "SET_ADHOC_MODE", "SET_BASIC_RATE", "MIB_CONTROL", "GET_CHANNEL_DATA", "GET_CUR_RSSI", "SET_ANTENNA_SWITCH", "0x2c", "0x2d", "0x2e", "USE_SHORT_SLOT_TIME", "SET_POWER_MODE", "SETUP_PSPOLL_DESC", "SET_RX_MULTICAST_FILTER", "RX_FILTER", "PER_CALIBRATION", "RESET", "DISABLE", "PHY_DISABLE", "SET_TX_POWER_LIMIT", "SET_TX_QUEUE_PARAMS", "SETUP_TX_QUEUE", "RELEASE_TX_QUEUE", }; static char buf[8]; if (code < nitems(names)) return names[code]; if (code == WDCMSG_SET_DEFAULT_KEY) return "SET_DEFAULT_KEY"; snprintf(buf, sizeof(buf), "0x%02x", code); return buf; } #endif /* * Low-level function to send read or write commands to the firmware. */ static int uath_cmdsend(struct uath_softc *sc, uint32_t code, const void *idata, int ilen, void *odata, int olen, int flags) { struct uath_cmd_hdr *hdr; struct uath_cmd *cmd; int error; UATH_ASSERT_LOCKED(sc); /* grab a xfer */ cmd = uath_get_cmdbuf(sc); if (cmd == NULL) { device_printf(sc->sc_dev, "%s: empty inactive queue\n", __func__); return (ENOBUFS); } cmd->flags = flags; /* always bulk-out a multiple of 4 bytes */ cmd->buflen = roundup2(sizeof(struct uath_cmd_hdr) + ilen, 4); hdr = (struct uath_cmd_hdr *)cmd->buf; memset(hdr, 0, sizeof(struct uath_cmd_hdr)); hdr->len = htobe32(cmd->buflen); hdr->code = htobe32(code); hdr->msgid = cmd->msgid; /* don't care about endianness */ hdr->magic = htobe32((cmd->flags & UATH_CMD_FLAG_MAGIC) ? 1 << 24 : 0); memcpy((uint8_t *)(hdr + 1), idata, ilen); #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { printf("%s: send %s [flags 0x%x] olen %d\n", __func__, uath_codename(code), cmd->flags, olen); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(cmd->buf, cmd->buflen, '+'); } #endif cmd->odata = odata; KASSERT(odata == NULL || olen < UATH_MAX_CMDSZ - sizeof(*hdr) + sizeof(uint32_t), ("odata %p olen %u", odata, olen)); cmd->olen = olen; STAILQ_INSERT_TAIL(&sc->sc_cmd_pending, cmd, next); UATH_STAT_INC(sc, st_cmd_pending); usbd_transfer_start(sc->sc_xfer[UATH_INTR_TX]); if (cmd->flags & UATH_CMD_FLAG_READ) { usbd_transfer_start(sc->sc_xfer[UATH_INTR_RX]); /* wait at most two seconds for command reply */ error = mtx_sleep(cmd, &sc->sc_mtx, 0, "uathcmd", 2 * hz); cmd->odata = NULL; /* in case reply comes too late */ if (error != 0) { device_printf(sc->sc_dev, "timeout waiting for reply " "to cmd 0x%x (%u)\n", code, code); } else if (cmd->olen != olen) { device_printf(sc->sc_dev, "unexpected reply data count " "to cmd 0x%x (%u), got %u, expected %u\n", code, code, cmd->olen, olen); error = EINVAL; } return (error); } return (0); } static int uath_cmd_read(struct uath_softc *sc, uint32_t code, const void *idata, int ilen, void *odata, int olen, int flags) { flags |= UATH_CMD_FLAG_READ; return uath_cmdsend(sc, code, idata, ilen, odata, olen, flags); } static int uath_cmd_write(struct uath_softc *sc, uint32_t code, const void *data, int len, int flags) { flags &= ~UATH_CMD_FLAG_READ; return uath_cmdsend(sc, code, data, len, NULL, 0, flags); } static struct uath_cmd * uath_get_cmdbuf(struct uath_softc *sc) { struct uath_cmd *uc; UATH_ASSERT_LOCKED(sc); uc = STAILQ_FIRST(&sc->sc_cmd_inactive); if (uc != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_inactive, next); UATH_STAT_DEC(sc, st_cmd_inactive); } else uc = NULL; if (uc == NULL) DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__, "out of command xmit buffers"); return (uc); } /* * This function is called periodically (every second) when associated to * query device statistics. */ static void uath_stat(void *arg) { struct uath_softc *sc = arg; int error; UATH_LOCK(sc); /* * Send request for statistics asynchronously. The timer will be * restarted when we'll get the stats notification. */ error = uath_cmd_write(sc, WDCMSG_TARGET_GET_STATS, NULL, 0, UATH_CMD_FLAG_ASYNC); if (error != 0) { device_printf(sc->sc_dev, "could not query stats, error %d\n", error); } UATH_UNLOCK(sc); } static int uath_get_capability(struct uath_softc *sc, uint32_t cap, uint32_t *val) { int error; cap = htobe32(cap); error = uath_cmd_read(sc, WDCMSG_TARGET_GET_CAPABILITY, &cap, sizeof cap, val, sizeof(uint32_t), UATH_CMD_FLAG_MAGIC); if (error != 0) { device_printf(sc->sc_dev, "could not read capability %u\n", be32toh(cap)); return (error); } *val = be32toh(*val); return (error); } static int uath_get_devcap(struct uath_softc *sc) { #define GETCAP(x, v) do { \ error = uath_get_capability(sc, x, &v); \ if (error != 0) \ return (error); \ DPRINTF(sc, UATH_DEBUG_DEVCAP, \ "%s: %s=0x%08x\n", __func__, #x, v); \ } while (0) struct uath_devcap *cap = &sc->sc_devcap; int error; /* collect device capabilities */ GETCAP(CAP_TARGET_VERSION, cap->targetVersion); GETCAP(CAP_TARGET_REVISION, cap->targetRevision); GETCAP(CAP_MAC_VERSION, cap->macVersion); GETCAP(CAP_MAC_REVISION, cap->macRevision); GETCAP(CAP_PHY_REVISION, cap->phyRevision); GETCAP(CAP_ANALOG_5GHz_REVISION, cap->analog5GhzRevision); GETCAP(CAP_ANALOG_2GHz_REVISION, cap->analog2GhzRevision); GETCAP(CAP_REG_DOMAIN, cap->regDomain); GETCAP(CAP_REG_CAP_BITS, cap->regCapBits); #if 0 /* NB: not supported in rev 1.5 */ GETCAP(CAP_COUNTRY_CODE, cap->countryCode); #endif GETCAP(CAP_WIRELESS_MODES, cap->wirelessModes); GETCAP(CAP_CHAN_SPREAD_SUPPORT, cap->chanSpreadSupport); GETCAP(CAP_COMPRESS_SUPPORT, cap->compressSupport); GETCAP(CAP_BURST_SUPPORT, cap->burstSupport); GETCAP(CAP_FAST_FRAMES_SUPPORT, cap->fastFramesSupport); GETCAP(CAP_CHAP_TUNING_SUPPORT, cap->chapTuningSupport); GETCAP(CAP_TURBOG_SUPPORT, cap->turboGSupport); GETCAP(CAP_TURBO_PRIME_SUPPORT, cap->turboPrimeSupport); GETCAP(CAP_DEVICE_TYPE, cap->deviceType); GETCAP(CAP_WME_SUPPORT, cap->wmeSupport); GETCAP(CAP_TOTAL_QUEUES, cap->numTxQueues); GETCAP(CAP_CONNECTION_ID_MAX, cap->connectionIdMax); GETCAP(CAP_LOW_5GHZ_CHAN, cap->low5GhzChan); GETCAP(CAP_HIGH_5GHZ_CHAN, cap->high5GhzChan); GETCAP(CAP_LOW_2GHZ_CHAN, cap->low2GhzChan); GETCAP(CAP_HIGH_2GHZ_CHAN, cap->high2GhzChan); GETCAP(CAP_TWICE_ANTENNAGAIN_5G, cap->twiceAntennaGain5G); GETCAP(CAP_TWICE_ANTENNAGAIN_2G, cap->twiceAntennaGain2G); GETCAP(CAP_CIPHER_AES_CCM, cap->supportCipherAES_CCM); GETCAP(CAP_CIPHER_TKIP, cap->supportCipherTKIP); GETCAP(CAP_MIC_TKIP, cap->supportMicTKIP); cap->supportCipherWEP = 1; /* NB: always available */ return (0); } static int uath_get_devstatus(struct uath_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { int error; /* retrieve MAC address */ error = uath_get_status(sc, ST_MAC_ADDR, macaddr, IEEE80211_ADDR_LEN); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC address\n"); return (error); } error = uath_get_status(sc, ST_SERIAL_NUMBER, &sc->sc_serial[0], sizeof(sc->sc_serial)); if (error != 0) { device_printf(sc->sc_dev, "could not read device serial number\n"); return (error); } return (0); } static int uath_get_status(struct uath_softc *sc, uint32_t which, void *odata, int olen) { int error; which = htobe32(which); error = uath_cmd_read(sc, WDCMSG_TARGET_GET_STATUS, &which, sizeof(which), odata, olen, UATH_CMD_FLAG_MAGIC); if (error != 0) device_printf(sc->sc_dev, "could not read EEPROM offset 0x%02x\n", be32toh(which)); return (error); } static void uath_free_data_list(struct uath_softc *sc, struct uath_data data[], int ndata, int fillmbuf) { int i; for (i = 0; i < ndata; i++) { struct uath_data *dp = &data[i]; if (fillmbuf == 1) { if (dp->m != NULL) { m_freem(dp->m); dp->m = NULL; dp->buf = NULL; } } else { dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static int uath_alloc_data_list(struct uath_softc *sc, struct uath_data data[], int ndata, int maxsz, void *dma_buf) { int i, error; for (i = 0; i < ndata; i++) { struct uath_data *dp = &data[i]; dp->sc = sc; if (dma_buf == NULL) { /* XXX check maxsz */ dp->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (dp->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } dp->buf = mtod(dp->m, uint8_t *); } else { dp->m = NULL; dp->buf = ((uint8_t *)dma_buf) + (i * maxsz); } dp->ni = NULL; } return (0); fail: uath_free_data_list(sc, data, ndata, 1 /* free mbufs */); return (error); } static int uath_alloc_rx_data_list(struct uath_softc *sc) { int error, i; /* XXX is it enough to store the RX packet with MCLBYTES bytes? */ error = uath_alloc_data_list(sc, sc->sc_rx, UATH_RX_DATA_LIST_COUNT, MCLBYTES, NULL /* setup mbufs */); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < UATH_RX_DATA_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); UATH_STAT_INC(sc, st_rx_inactive); } return (0); } static int uath_alloc_tx_data_list(struct uath_softc *sc) { int error, i; error = uath_alloc_data_list(sc, sc->sc_tx, UATH_TX_DATA_LIST_COUNT, UATH_MAX_TXBUFSZ, sc->sc_tx_dma_buf); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < UATH_TX_DATA_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); UATH_STAT_INC(sc, st_tx_inactive); } return (0); } static void uath_free_rx_data_list(struct uath_softc *sc) { uath_free_data_list(sc, sc->sc_rx, UATH_RX_DATA_LIST_COUNT, 1 /* free mbufs */); } static void uath_free_tx_data_list(struct uath_softc *sc) { uath_free_data_list(sc, sc->sc_tx, UATH_TX_DATA_LIST_COUNT, 0 /* no mbufs */); } static struct ieee80211vap * uath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct uath_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = malloc(sizeof(struct uath_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = uath_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return (vap); } static void uath_vap_delete(struct ieee80211vap *vap) { struct uath_vap *uvp = UATH_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static int uath_init(struct uath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t val; int error; UATH_ASSERT_LOCKED(sc); if (sc->sc_flags & UATH_FLAG_INITDONE) uath_stop(sc); /* reset variables */ sc->sc_intrx_nextnum = sc->sc_msgid = 0; val = htobe32(0); uath_cmd_write(sc, WDCMSG_BIND, &val, sizeof val, 0); /* set MAC address */ uath_config_multi(sc, CFG_MAC_ADDR, vap ? vap->iv_myaddr : ic->ic_macaddr, IEEE80211_ADDR_LEN); /* XXX honor net80211 state */ uath_config(sc, CFG_RATE_CONTROL_ENABLE, 0x00000001); uath_config(sc, CFG_DIVERSITY_CTL, 0x00000001); uath_config(sc, CFG_ABOLT, 0x0000003f); uath_config(sc, CFG_WME_ENABLED, 0x00000001); uath_config(sc, CFG_SERVICE_TYPE, 1); uath_config(sc, CFG_TP_SCALE, 0x00000000); uath_config(sc, CFG_TPC_HALF_DBM5, 0x0000003c); uath_config(sc, CFG_TPC_HALF_DBM2, 0x0000003c); uath_config(sc, CFG_OVERRD_TX_POWER, 0x00000000); uath_config(sc, CFG_GMODE_PROTECTION, 0x00000000); uath_config(sc, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003); uath_config(sc, CFG_PROTECTION_TYPE, 0x00000000); uath_config(sc, CFG_MODE_CTS, 0x00000002); error = uath_cmd_read(sc, WDCMSG_TARGET_START, NULL, 0, &val, sizeof(val), UATH_CMD_FLAG_MAGIC); if (error) { device_printf(sc->sc_dev, "could not start target, error %d\n", error); goto fail; } DPRINTF(sc, UATH_DEBUG_INIT, "%s returns handle: 0x%x\n", uath_codename(WDCMSG_TARGET_START), be32toh(val)); /* set default channel */ error = uath_switch_channel(sc, ic->ic_curchan); if (error) { device_printf(sc->sc_dev, "could not switch channel, error %d\n", error); goto fail; } val = htobe32(TARGET_DEVICE_AWAKE); uath_cmd_write(sc, WDCMSG_SET_PWR_MODE, &val, sizeof val, 0); /* XXX? check */ uath_cmd_write(sc, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0); usbd_transfer_start(sc->sc_xfer[UATH_BULK_RX]); /* enable Rx */ uath_set_rxfilter(sc, 0x0, UATH_FILTER_OP_INIT); uath_set_rxfilter(sc, UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON, UATH_FILTER_OP_SET); sc->sc_flags |= UATH_FLAG_INITDONE; callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc); return (0); fail: uath_stop(sc); return (error); } static void uath_stop(struct uath_softc *sc) { UATH_ASSERT_LOCKED(sc); sc->sc_flags &= ~UATH_FLAG_INITDONE; callout_stop(&sc->stat_ch); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; /* abort pending transmits */ uath_abort_xfers(sc); /* flush data & control requests into the target */ (void)uath_flush(sc); /* set a LED status to the disconnected. */ uath_set_ledstate(sc, 0); /* stop the target */ uath_cmd_write(sc, WDCMSG_TARGET_STOP, NULL, 0, 0); } static int uath_config(struct uath_softc *sc, uint32_t reg, uint32_t val) { struct uath_write_mac write; int error; write.reg = htobe32(reg); write.len = htobe32(0); /* 0 = single write */ *(uint32_t *)write.data = htobe32(val); error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write, 3 * sizeof (uint32_t), 0); if (error != 0) { device_printf(sc->sc_dev, "could not write register 0x%02x\n", reg); } return (error); } static int uath_config_multi(struct uath_softc *sc, uint32_t reg, const void *data, int len) { struct uath_write_mac write; int error; write.reg = htobe32(reg); write.len = htobe32(len); bcopy(data, write.data, len); /* properly handle the case where len is zero (reset) */ error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write, (len == 0) ? sizeof (uint32_t) : 2 * sizeof (uint32_t) + len, 0); if (error != 0) { device_printf(sc->sc_dev, "could not write %d bytes to register 0x%02x\n", len, reg); } return (error); } static int uath_switch_channel(struct uath_softc *sc, struct ieee80211_channel *c) { int error; UATH_ASSERT_LOCKED(sc); /* set radio frequency */ error = uath_set_chan(sc, c); if (error) { device_printf(sc->sc_dev, "could not set channel, error %d\n", error); goto failed; } /* reset Tx rings */ error = uath_reset_tx_queues(sc); if (error) { device_printf(sc->sc_dev, "could not reset Tx queues, error %d\n", error); goto failed; } /* set Tx rings WME properties */ error = uath_wme_init(sc); if (error) { device_printf(sc->sc_dev, "could not init Tx queues, error %d\n", error); goto failed; } error = uath_set_ledstate(sc, 0); if (error) { device_printf(sc->sc_dev, "could not set led state, error %d\n", error); goto failed; } error = uath_flush(sc); if (error) { device_printf(sc->sc_dev, "could not flush pipes, error %d\n", error); goto failed; } failed: return (error); } static int uath_set_rxfilter(struct uath_softc *sc, uint32_t bits, uint32_t op) { struct uath_cmd_rx_filter rxfilter; rxfilter.bits = htobe32(bits); rxfilter.op = htobe32(op); DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "setting Rx filter=0x%x flags=0x%x\n", bits, op); return uath_cmd_write(sc, WDCMSG_RX_FILTER, &rxfilter, sizeof rxfilter, 0); } static void uath_watchdog(void *arg) { struct uath_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(ic->ic_oerrors, 1); ieee80211_restart_all(ic); return; } callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc); } } static void uath_abort_xfers(struct uath_softc *sc) { int i; UATH_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < UATH_N_XFERS; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int uath_flush(struct uath_softc *sc) { int error; error = uath_dataflush(sc); if (error != 0) goto failed; error = uath_cmdflush(sc); if (error != 0) goto failed; failed: return (error); } static int uath_cmdflush(struct uath_softc *sc) { return uath_cmd_write(sc, WDCMSG_FLUSH, NULL, 0, 0); } static int uath_dataflush(struct uath_softc *sc) { struct uath_data *data; struct uath_chunk *chunk; struct uath_tx_desc *desc; UATH_ASSERT_LOCKED(sc); data = uath_getbuf(sc); if (data == NULL) return (ENOBUFS); data->buflen = sizeof(struct uath_chunk) + sizeof(struct uath_tx_desc); data->m = NULL; data->ni = NULL; chunk = (struct uath_chunk *)data->buf; desc = (struct uath_tx_desc *)(chunk + 1); /* one chunk only */ chunk->seqnum = 0; chunk->flags = UATH_CFLAGS_FINAL; chunk->length = htobe16(sizeof (struct uath_tx_desc)); memset(desc, 0, sizeof(struct uath_tx_desc)); desc->msglen = htobe32(sizeof(struct uath_tx_desc)); desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */ desc->type = htobe32(WDCMSG_FLUSH); desc->txqid = htobe32(0); desc->connid = htobe32(0); desc->flags = htobe32(0); #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { DPRINTF(sc, UATH_DEBUG_RESET, "send flush ix %d\n", desc->msgid); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(data->buf, data->buflen, '+'); } #endif STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); UATH_STAT_INC(sc, st_tx_pending); sc->sc_tx_timer = 5; usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]); return (0); } static struct uath_data * _uath_getbuf(struct uath_softc *sc) { struct uath_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); UATH_STAT_DEC(sc, st_tx_inactive); } else bf = NULL; if (bf == NULL) DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__, "out of xmit buffers"); return (bf); } static struct uath_data * uath_getbuf(struct uath_softc *sc) { struct uath_data *bf; UATH_ASSERT_LOCKED(sc); bf = _uath_getbuf(sc); if (bf == NULL) DPRINTF(sc, UATH_DEBUG_XMIT, "%s: stop queue\n", __func__); return (bf); } static int uath_set_ledstate(struct uath_softc *sc, int connected) { DPRINTF(sc, UATH_DEBUG_LED, "set led state %sconnected\n", connected ? "" : "!"); connected = htobe32(connected); return uath_cmd_write(sc, WDCMSG_SET_LED_STATE, &connected, sizeof connected, 0); } static int uath_set_chan(struct uath_softc *sc, struct ieee80211_channel *c) { #ifdef UATH_DEBUG struct ieee80211com *ic = &sc->sc_ic; #endif struct uath_cmd_reset reset; memset(&reset, 0, sizeof(reset)); if (IEEE80211_IS_CHAN_2GHZ(c)) reset.flags |= htobe32(UATH_CHAN_2GHZ); if (IEEE80211_IS_CHAN_5GHZ(c)) reset.flags |= htobe32(UATH_CHAN_5GHZ); /* NB: 11g =>'s 11b so don't specify both OFDM and CCK */ if (IEEE80211_IS_CHAN_OFDM(c)) reset.flags |= htobe32(UATH_CHAN_OFDM); else if (IEEE80211_IS_CHAN_CCK(c)) reset.flags |= htobe32(UATH_CHAN_CCK); /* turbo can be used in either 2GHz or 5GHz */ if (c->ic_flags & IEEE80211_CHAN_TURBO) reset.flags |= htobe32(UATH_CHAN_TURBO); reset.freq = htobe32(c->ic_freq); reset.maxrdpower = htobe32(50); /* XXX */ reset.channelchange = htobe32(1); reset.keeprccontent = htobe32(0); DPRINTF(sc, UATH_DEBUG_CHANNEL, "set channel %d, flags 0x%x freq %u\n", ieee80211_chan2ieee(ic, c), be32toh(reset.flags), be32toh(reset.freq)); return uath_cmd_write(sc, WDCMSG_RESET, &reset, sizeof reset, 0); } static int uath_reset_tx_queues(struct uath_softc *sc) { int ac, error; DPRINTF(sc, UATH_DEBUG_RESET, "%s: reset Tx queues\n", __func__); for (ac = 0; ac < 4; ac++) { const uint32_t qid = htobe32(ac); error = uath_cmd_write(sc, WDCMSG_RELEASE_TX_QUEUE, &qid, sizeof qid, 0); if (error != 0) break; } return (error); } static int uath_wme_init(struct uath_softc *sc) { /* XXX get from net80211 */ static const struct uath_wme_settings uath_wme_11g[4] = { { 7, 4, 10, 0, 0 }, /* Background */ { 3, 4, 10, 0, 0 }, /* Best-Effort */ { 3, 3, 4, 26, 0 }, /* Video */ { 2, 2, 3, 47, 0 } /* Voice */ }; struct uath_cmd_txq_setup qinfo; int ac, error; DPRINTF(sc, UATH_DEBUG_WME, "%s: setup Tx queues\n", __func__); for (ac = 0; ac < 4; ac++) { qinfo.qid = htobe32(ac); qinfo.len = htobe32(sizeof(qinfo.attr)); qinfo.attr.priority = htobe32(ac); /* XXX */ qinfo.attr.aifs = htobe32(uath_wme_11g[ac].aifsn); qinfo.attr.logcwmin = htobe32(uath_wme_11g[ac].logcwmin); qinfo.attr.logcwmax = htobe32(uath_wme_11g[ac].logcwmax); qinfo.attr.bursttime = htobe32(IEEE80211_TXOP_TO_US( uath_wme_11g[ac].txop)); qinfo.attr.mode = htobe32(uath_wme_11g[ac].acm);/*XXX? */ qinfo.attr.qflags = htobe32(1); /* XXX? */ error = uath_cmd_write(sc, WDCMSG_SETUP_TX_QUEUE, &qinfo, sizeof qinfo, 0); if (error != 0) break; } return (error); } static void uath_parent(struct ieee80211com *ic) { struct uath_softc *sc = ic->ic_softc; int startall = 0; UATH_LOCK(sc); if (sc->sc_flags & UATH_FLAG_INVALID) { UATH_UNLOCK(sc); return; } if (ic->ic_nrunning > 0) { if (!(sc->sc_flags & UATH_FLAG_INITDONE)) { uath_init(sc); startall = 1; } } else if (sc->sc_flags & UATH_FLAG_INITDONE) uath_stop(sc); UATH_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, struct uath_data *data) { struct ieee80211vap *vap = ni->ni_vap; struct uath_chunk *chunk; struct uath_tx_desc *desc; const struct ieee80211_frame *wh; struct ieee80211_key *k; int framelen, msglen; UATH_ASSERT_LOCKED(sc); data->ni = ni; data->m = m0; chunk = (struct uath_chunk *)data->buf; desc = (struct uath_tx_desc *)(chunk + 1); if (ieee80211_radiotap_active_vap(vap)) { struct uath_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (m0->m_flags & M_FRAG) tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; ieee80211_radiotap_tx(vap, m0); } wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(desc + 1)); framelen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; msglen = framelen + sizeof (struct uath_tx_desc); data->buflen = msglen + sizeof (struct uath_chunk); /* one chunk only for now */ chunk->seqnum = sc->sc_seqnum++; chunk->flags = (m0->m_flags & M_FRAG) ? 0 : UATH_CFLAGS_FINAL; if (m0->m_flags & M_LASTFRAG) chunk->flags |= UATH_CFLAGS_FINAL; chunk->flags = UATH_CFLAGS_FINAL; chunk->length = htobe16(msglen); /* fill Tx descriptor */ desc->msglen = htobe32(msglen); /* NB: to get UATH_TX_NOTIFY reply, `msgid' must be larger than 0 */ desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */ desc->type = htobe32(WDCMSG_SEND); switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: /* NB: force all management frames to highest queue */ if (ni->ni_flags & IEEE80211_NODE_QOS) { /* NB: force all management frames to highest queue */ desc->txqid = htobe32(WME_AC_VO | UATH_TXQID_MINRATE); } else desc->txqid = htobe32(WME_AC_BE | UATH_TXQID_MINRATE); break; case IEEE80211_FC0_TYPE_DATA: /* XXX multicast frames should honor mcastrate */ desc->txqid = htobe32(M_WME_GETAC(m0)); break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); m_freem(m0); return (EIO); } if (vap->iv_state == IEEE80211_S_AUTH || vap->iv_state == IEEE80211_S_ASSOC || vap->iv_state == IEEE80211_S_RUN) desc->connid = htobe32(UATH_ID_BSS); else desc->connid = htobe32(UATH_ID_INVALID); desc->flags = htobe32(0 /* no UATH_TX_NOTIFY */); desc->buflen = htobe32(m0->m_pkthdr.len); #ifdef UATH_DEBUG DPRINTF(sc, UATH_DEBUG_XMIT, "send frame ix %u framelen %d msglen %d connid 0x%x txqid 0x%x\n", desc->msgid, framelen, msglen, be32toh(desc->connid), be32toh(desc->txqid)); if (sc->sc_debug & UATH_DEBUG_XMIT_DUMP) uath_dump_cmd(data->buf, data->buflen, '+'); #endif STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); UATH_STAT_INC(sc, st_tx_pending); usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]); return (0); } /* * Cleanup driver resources when we run out of buffers while processing * fragments; return the tx buffers allocated and drop node references. */ static void uath_txfrag_cleanup(struct uath_softc *sc, uath_datahead *frags, struct ieee80211_node *ni) { struct uath_data *bf, *next; UATH_ASSERT_LOCKED(sc); STAILQ_FOREACH_SAFE(bf, frags, next, next) { /* NB: bf assumed clean */ STAILQ_REMOVE_HEAD(frags, next); STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer for each frag and bump * the node reference count to reflect the held reference to be setup by * uath_tx_start. */ static int uath_txfrag_setup(struct uath_softc *sc, uath_datahead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct uath_data *bf; UATH_ASSERT_LOCKED(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { bf = uath_getbuf(sc); if (bf == NULL) { /* out of buffers, cleanup */ uath_txfrag_cleanup(sc, frags, ni); break; } - ieee80211_node_incref(ni); + (void) ieee80211_ref_node(ni); STAILQ_INSERT_TAIL(frags, bf, next); } return !STAILQ_EMPTY(frags); } static int uath_transmit(struct ieee80211com *ic, struct mbuf *m) { struct uath_softc *sc = ic->ic_softc; int error; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INITDONE) == 0) { UATH_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { UATH_UNLOCK(sc); return (error); } uath_start(sc); UATH_UNLOCK(sc); return (0); } static void uath_start(struct uath_softc *sc) { struct uath_data *bf; struct ieee80211_node *ni; struct mbuf *m, *next; uath_datahead frags; UATH_ASSERT_LOCKED(sc); if ((sc->sc_flags & UATH_FLAG_INITDONE) == 0 || (sc->sc_flags & UATH_FLAG_INVALID)) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { bf = uath_getbuf(sc); if (bf == NULL) { mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; /* * Check for fragmentation. If this frame has been broken up * verify we have enough buffers to send all the fragments * so all go out or none... */ STAILQ_INIT(&frags); if ((m->m_flags & M_FRAG) && !uath_txfrag_setup(sc, &frags, m, ni)) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: out of txfrag buffers\n", __func__); ieee80211_free_mbuf(m); goto bad; } sc->sc_seqnum = 0; nextfrag: /* * Pass the frame to the h/w for transmission. * Fragmented frames have each frag chained together * with m_nextpkt. We know there are sufficient uath_data's * to send all the frags because of work done by * uath_txfrag_setup. */ next = m->m_nextpkt; if (uath_tx_start(sc, m, ni, bf) != 0) { bad: if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); reclaim: STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); uath_txfrag_cleanup(sc, &frags, ni); ieee80211_free_node(ni); continue; } if (next != NULL) { /* * Beware of state changing between frags. XXX check sta power-save state? */ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: flush fragmented packet, state %s\n", __func__, ieee80211_state_name[ni->ni_vap->iv_state]); ieee80211_free_mbuf(next); goto reclaim; } m = next; bf = STAILQ_FIRST(&frags); KASSERT(bf != NULL, ("no buf for txfrag")); STAILQ_REMOVE_HEAD(&frags, next); goto nextfrag; } sc->sc_tx_timer = 5; } } static int uath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct uath_data *bf; struct uath_softc *sc = ic->ic_softc; UATH_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if ((sc->sc_flags & UATH_FLAG_INVALID) || !(sc->sc_flags & UATH_FLAG_INITDONE)) { m_freem(m); UATH_UNLOCK(sc); return (ENETDOWN); } /* grab a TX buffer */ bf = uath_getbuf(sc); if (bf == NULL) { m_freem(m); UATH_UNLOCK(sc); return (ENOBUFS); } sc->sc_seqnum = 0; if (uath_tx_start(sc, m, ni, bf) != 0) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); UATH_STAT_INC(sc, st_tx_inactive); UATH_UNLOCK(sc); return (EIO); } UATH_UNLOCK(sc); sc->sc_tx_timer = 5; return (0); } static void uath_scan_start(struct ieee80211com *ic) { /* do nothing */ } static void uath_scan_end(struct ieee80211com *ic) { /* do nothing */ } static void uath_set_channel(struct ieee80211com *ic) { struct uath_softc *sc = ic->ic_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (sc->sc_flags & UATH_FLAG_INITDONE) == 0) { UATH_UNLOCK(sc); return; } (void)uath_switch_channel(sc, ic->ic_curchan); UATH_UNLOCK(sc); } static int uath_set_rxmulti_filter(struct uath_softc *sc) { /* XXX broken */ return (0); } static void uath_update_mcast(struct ieee80211com *ic) { struct uath_softc *sc = ic->ic_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (sc->sc_flags & UATH_FLAG_INITDONE) == 0) { UATH_UNLOCK(sc); return; } /* * this is for avoiding the race condition when we're try to * connect to the AP with WPA. */ if (sc->sc_flags & UATH_FLAG_INITDONE) (void)uath_set_rxmulti_filter(sc); UATH_UNLOCK(sc); } static void uath_update_promisc(struct ieee80211com *ic) { struct uath_softc *sc = ic->ic_softc; UATH_LOCK(sc); if ((sc->sc_flags & UATH_FLAG_INVALID) || (sc->sc_flags & UATH_FLAG_INITDONE) == 0) { UATH_UNLOCK(sc); return; } if (sc->sc_flags & UATH_FLAG_INITDONE) { uath_set_rxfilter(sc, UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST | UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON | UATH_FILTER_RX_PROM, UATH_FILTER_OP_SET); } UATH_UNLOCK(sc); } static int uath_create_connection(struct uath_softc *sc, uint32_t connid) { const struct ieee80211_rateset *rs; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct uath_cmd_create_connection create; ni = ieee80211_ref_node(vap->iv_bss); memset(&create, 0, sizeof(create)); create.connid = htobe32(connid); create.bssid = htobe32(0); /* XXX packed or not? */ create.size = htobe32(sizeof(struct uath_cmd_rateset)); rs = &ni->ni_rates; create.connattr.rateset.length = rs->rs_nrates; bcopy(rs->rs_rates, &create.connattr.rateset.set[0], rs->rs_nrates); /* XXX turbo */ if (IEEE80211_IS_CHAN_A(ni->ni_chan)) create.connattr.wlanmode = htobe32(WLAN_MODE_11a); else if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) create.connattr.wlanmode = htobe32(WLAN_MODE_11g); else create.connattr.wlanmode = htobe32(WLAN_MODE_11b); ieee80211_free_node(ni); return uath_cmd_write(sc, WDCMSG_CREATE_CONNECTION, &create, sizeof create, 0); } static int uath_set_rates(struct uath_softc *sc, const struct ieee80211_rateset *rs) { struct uath_cmd_rates rates; memset(&rates, 0, sizeof(rates)); rates.connid = htobe32(UATH_ID_BSS); /* XXX */ rates.size = htobe32(sizeof(struct uath_cmd_rateset)); /* XXX bounds check rs->rs_nrates */ rates.rateset.length = rs->rs_nrates; bcopy(rs->rs_rates, &rates.rateset.set[0], rs->rs_nrates); DPRINTF(sc, UATH_DEBUG_RATES, "setting supported rates nrates=%d\n", rs->rs_nrates); return uath_cmd_write(sc, WDCMSG_SET_BASIC_RATE, &rates, sizeof rates, 0); } static int uath_write_associd(struct uath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct uath_cmd_set_associd associd; ni = ieee80211_ref_node(vap->iv_bss); memset(&associd, 0, sizeof(associd)); associd.defaultrateix = htobe32(1); /* XXX */ associd.associd = htobe32(ni->ni_associd); associd.timoffset = htobe32(0x3b); /* XXX */ IEEE80211_ADDR_COPY(associd.bssid, ni->ni_bssid); ieee80211_free_node(ni); return uath_cmd_write(sc, WDCMSG_WRITE_ASSOCID, &associd, sizeof associd, 0); } static int uath_set_ledsteady(struct uath_softc *sc, int lednum, int ledmode) { struct uath_cmd_ledsteady led; led.lednum = htobe32(lednum); led.ledmode = htobe32(ledmode); DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (steady)\n", (lednum == UATH_LED_LINK) ? "link" : "activity", ledmode ? "on" : "off"); return uath_cmd_write(sc, WDCMSG_SET_LED_STEADY, &led, sizeof led, 0); } static int uath_set_ledblink(struct uath_softc *sc, int lednum, int ledmode, int blinkrate, int slowmode) { struct uath_cmd_ledblink led; led.lednum = htobe32(lednum); led.ledmode = htobe32(ledmode); led.blinkrate = htobe32(blinkrate); led.slowmode = htobe32(slowmode); DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (blink)\n", (lednum == UATH_LED_LINK) ? "link" : "activity", ledmode ? "on" : "off"); return uath_cmd_write(sc, WDCMSG_SET_LED_BLINK, &led, sizeof led, 0); } static int uath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { enum ieee80211_state ostate = vap->iv_state; int error; struct ieee80211_node *ni; struct ieee80211com *ic = vap->iv_ic; struct uath_softc *sc = ic->ic_softc; struct uath_vap *uvp = UATH_VAP(vap); DPRINTF(sc, UATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); UATH_LOCK(sc); callout_stop(&sc->stat_ch); callout_stop(&sc->watchdog_ch); ni = ieee80211_ref_node(vap->iv_bss); switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) { /* turn link and activity LEDs off */ uath_set_ledstate(sc, 0); } break; case IEEE80211_S_SCAN: break; case IEEE80211_S_AUTH: /* XXX good place? set RTS threshold */ uath_config(sc, CFG_USER_RTS_THRESHOLD, vap->iv_rtsthreshold); /* XXX bad place */ error = uath_set_keys(sc, vap); if (error != 0) { device_printf(sc->sc_dev, "could not set crypto keys, error %d\n", error); break; } if (uath_switch_channel(sc, ni->ni_chan) != 0) { device_printf(sc->sc_dev, "could not switch channel\n"); break; } if (uath_create_connection(sc, UATH_ID_BSS) != 0) { device_printf(sc->sc_dev, "could not create connection\n"); break; } break; case IEEE80211_S_ASSOC: if (uath_set_rates(sc, &ni->ni_rates) != 0) { device_printf(sc->sc_dev, "could not set negotiated rate set\n"); break; } break; case IEEE80211_S_RUN: /* XXX monitor mode doesn't be tested */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { uath_set_ledstate(sc, 1); break; } /* * Tx rate is controlled by firmware, report the maximum * negotiated rate in ifconfig output. */ ni->ni_txrate = ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1]; if (uath_write_associd(sc) != 0) { device_printf(sc->sc_dev, "could not write association id\n"); break; } /* turn link LED on */ uath_set_ledsteady(sc, UATH_LED_LINK, UATH_LED_ON); /* make activity LED blink */ uath_set_ledblink(sc, UATH_LED_ACTIVITY, UATH_LED_ON, 1, 2); /* set state to associated */ uath_set_ledstate(sc, 1); /* start statistics timer */ callout_reset(&sc->stat_ch, hz, uath_stat, sc); break; default: break; } ieee80211_free_node(ni); UATH_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static int uath_set_key(struct uath_softc *sc, const struct ieee80211_key *wk, int index) { #if 0 struct uath_cmd_crypto crypto; int i; memset(&crypto, 0, sizeof(crypto)); crypto.keyidx = htobe32(index); crypto.magic1 = htobe32(1); crypto.size = htobe32(368); crypto.mask = htobe32(0xffff); crypto.flags = htobe32(0x80000068); if (index != UATH_DEFAULT_KEY) crypto.flags |= htobe32(index << 16); memset(crypto.magic2, 0xff, sizeof(crypto.magic2)); /* * Each byte of the key must be XOR'ed with 10101010 before being * transmitted to the firmware. */ for (i = 0; i < wk->wk_keylen; i++) crypto.key[i] = wk->wk_key[i] ^ 0xaa; DPRINTF(sc, UATH_DEBUG_CRYPTO, "setting crypto key index=%d len=%d\n", index, wk->wk_keylen); return uath_cmd_write(sc, WDCMSG_SET_KEY_CACHE_ENTRY, &crypto, sizeof crypto, 0); #else /* XXX support H/W cryto */ return (0); #endif } static int uath_set_keys(struct uath_softc *sc, struct ieee80211vap *vap) { int i, error; error = 0; for (i = 0; i < IEEE80211_WEP_NKID; i++) { const struct ieee80211_key *wk = &vap->iv_nw_keys[i]; if (wk->wk_flags & (IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV)) { error = uath_set_key(sc, wk, i); if (error) return (error); } } if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) { error = uath_set_key(sc, &vap->iv_nw_keys[vap->iv_def_txkey], UATH_DEFAULT_KEY); } return (error); } #define UATH_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) static void uath_sysctl_node(struct uath_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; struct sysctl_oid *tree; struct uath_stat *stats; stats = &sc->sc_stat; ctx = device_get_sysctl_ctx(sc->sc_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "UATH statistics"); child = SYSCTL_CHILDREN(tree); UATH_SYSCTL_STAT_ADD32(ctx, child, "badchunkseqnum", &stats->st_badchunkseqnum, "Bad chunk sequence numbers"); UATH_SYSCTL_STAT_ADD32(ctx, child, "invalidlen", &stats->st_invalidlen, "Invalid length"); UATH_SYSCTL_STAT_ADD32(ctx, child, "multichunk", &stats->st_multichunk, "Multi chunks"); UATH_SYSCTL_STAT_ADD32(ctx, child, "toobigrxpkt", &stats->st_toobigrxpkt, "Too big rx packets"); UATH_SYSCTL_STAT_ADD32(ctx, child, "stopinprogress", &stats->st_stopinprogress, "Stop in progress"); UATH_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", &stats->st_crcerr, "CRC errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "phyerr", &stats->st_phyerr, "PHY errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_crcerr", &stats->st_decrypt_crcerr, "Decryption CRC errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_micerr", &stats->st_decrypt_micerr, "Decryption Misc errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "decomperr", &stats->st_decomperr, "Decomp errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "keyerr", &stats->st_keyerr, "Key errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "err", &stats->st_err, "Unknown errors"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_active", &stats->st_cmd_active, "Active numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_inactive", &stats->st_cmd_inactive, "Inactive numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_pending", &stats->st_cmd_pending, "Pending numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_waiting", &stats->st_cmd_waiting, "Waiting numbers in Command queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_active", &stats->st_rx_active, "Active numbers in RX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_inactive", &stats->st_rx_inactive, "Inactive numbers in RX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_active", &stats->st_tx_active, "Active numbers in TX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_inactive", &stats->st_tx_inactive, "Inactive numbers in TX queue"); UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_pending", &stats->st_tx_pending, "Pending numbers in TX queue"); } #undef UATH_SYSCTL_STAT_ADD32 CTASSERT(sizeof(u_int) >= sizeof(uint32_t)); static void uath_cmdeof(struct uath_softc *sc, struct uath_cmd *cmd) { struct uath_cmd_hdr *hdr; uint32_t dlen; hdr = (struct uath_cmd_hdr *)cmd->buf; /* NB: msgid is passed thru w/o byte swapping */ #ifdef UATH_DEBUG if (sc->sc_debug & UATH_DEBUG_CMDS) { uint32_t len = be32toh(hdr->len); printf("%s: %s [ix %u] len %u status %u\n", __func__, uath_codename(be32toh(hdr->code)), hdr->msgid, len, be32toh(hdr->magic)); if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP) uath_dump_cmd(cmd->buf, len > UATH_MAX_CMDSZ ? sizeof(*hdr) : len, '-'); } #endif hdr->code = be32toh(hdr->code); hdr->len = be32toh(hdr->len); hdr->magic = be32toh(hdr->magic); /* target status on return */ switch (hdr->code & 0xff) { /* reply to a read command */ default: DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: code %d hdr len %u\n", __func__, hdr->code & 0xff, hdr->len); /* * The first response from the target after the * HOST_AVAILABLE has an invalid msgid so we must * treat it specially. */ if (hdr->msgid < UATH_CMD_LIST_COUNT) { uint32_t *rp = (uint32_t *)(hdr+1); u_int olen; if (sizeof(*hdr) > hdr->len || hdr->len > UATH_MAX_CMDSZ) { device_printf(sc->sc_dev, "%s: invalid WDC msg length %u; " "msg ignored\n", __func__, hdr->len); return; } /* * Calculate return/receive payload size; the * first word, if present, always gives the * number of bytes--unless it's 0 in which * case a single 32-bit word should be present. */ dlen = hdr->len - sizeof(*hdr); if (dlen >= sizeof(uint32_t)) { olen = be32toh(rp[0]); dlen -= sizeof(uint32_t); if (olen == 0) { /* convention is 0 =>'s one word */ olen = sizeof(uint32_t); /* XXX KASSERT(olen == dlen ) */ } } else olen = 0; if (cmd->odata != NULL) { /* NB: cmd->olen validated in uath_cmd */ if (olen > (u_int)cmd->olen) { /* XXX complain? */ device_printf(sc->sc_dev, "%s: cmd 0x%x olen %u cmd olen %u\n", __func__, hdr->code, olen, cmd->olen); olen = cmd->olen; } if (olen > dlen) { /* XXX complain, shouldn't happen */ device_printf(sc->sc_dev, "%s: cmd 0x%x olen %u dlen %u\n", __func__, hdr->code, olen, dlen); olen = dlen; } /* XXX have submitter do this */ /* copy answer into caller's supplied buffer */ bcopy(&rp[1], cmd->odata, olen); cmd->olen = olen; } } wakeup_one(cmd); /* wake up caller */ break; case WDCMSG_TARGET_START: if (hdr->msgid >= UATH_CMD_LIST_COUNT) { /* XXX */ return; } dlen = hdr->len - sizeof(*hdr); if (dlen != sizeof(uint32_t)) { device_printf(sc->sc_dev, "%s: dlen (%u) != %zu!\n", __func__, dlen, sizeof(uint32_t)); return; } /* XXX have submitter do this */ /* copy answer into caller's supplied buffer */ bcopy(hdr+1, cmd->odata, sizeof(uint32_t)); cmd->olen = sizeof(uint32_t); wakeup_one(cmd); /* wake up caller */ break; case WDCMSG_SEND_COMPLETE: /* this notification is sent when UATH_TX_NOTIFY is set */ DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: received Tx notification\n", __func__); break; case WDCMSG_TARGET_GET_STATS: DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL, "%s: received device statistics\n", __func__); callout_reset(&sc->stat_ch, hz, uath_stat, sc); break; } } static void uath_intr_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct uath_cmd *cmd; struct uath_cmd_hdr *hdr; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: cmd = STAILQ_FIRST(&sc->sc_cmd_waiting); if (cmd == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_cmd_waiting, next); UATH_STAT_DEC(sc, st_cmd_waiting); STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next); UATH_STAT_INC(sc, st_cmd_inactive); if (actlen < sizeof(struct uath_cmd_hdr)) { device_printf(sc->sc_dev, "%s: short xfer error (actlen %d)\n", __func__, actlen); goto setup; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, cmd->buf, actlen); hdr = (struct uath_cmd_hdr *)cmd->buf; if (be32toh(hdr->len) > (uint32_t)actlen) { device_printf(sc->sc_dev, "%s: truncated xfer (len %u, actlen %d)\n", __func__, be32toh(hdr->len), actlen); goto setup; } uath_cmdeof(sc, cmd); case USB_ST_SETUP: setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static void uath_intr_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct uath_cmd *cmd; UATH_ASSERT_LOCKED(sc); cmd = STAILQ_FIRST(&sc->sc_cmd_active); if (cmd != NULL && USB_GET_STATE(xfer) != USB_ST_SETUP) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next); UATH_STAT_DEC(sc, st_cmd_active); STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_READ) ? &sc->sc_cmd_waiting : &sc->sc_cmd_inactive, cmd, next); if (cmd->flags & UATH_CMD_FLAG_READ) UATH_STAT_INC(sc, st_cmd_waiting); else UATH_STAT_INC(sc, st_cmd_inactive); } switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: case USB_ST_SETUP: setup: cmd = STAILQ_FIRST(&sc->sc_cmd_pending); if (cmd == NULL) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_cmd_pending, next); UATH_STAT_DEC(sc, st_cmd_pending); STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_ASYNC) ? &sc->sc_cmd_inactive : &sc->sc_cmd_active, cmd, next); if (cmd->flags & UATH_CMD_FLAG_ASYNC) UATH_STAT_INC(sc, st_cmd_inactive); else UATH_STAT_INC(sc, st_cmd_active); usbd_xfer_set_frame_data(xfer, 0, cmd->buf, cmd->buflen); usbd_transfer_submit(xfer); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static void uath_update_rxstat(struct uath_softc *sc, uint32_t status) { switch (status) { case UATH_STATUS_STOP_IN_PROGRESS: UATH_STAT_INC(sc, st_stopinprogress); break; case UATH_STATUS_CRC_ERR: UATH_STAT_INC(sc, st_crcerr); break; case UATH_STATUS_PHY_ERR: UATH_STAT_INC(sc, st_phyerr); break; case UATH_STATUS_DECRYPT_CRC_ERR: UATH_STAT_INC(sc, st_decrypt_crcerr); break; case UATH_STATUS_DECRYPT_MIC_ERR: UATH_STAT_INC(sc, st_decrypt_micerr); break; case UATH_STATUS_DECOMP_ERR: UATH_STAT_INC(sc, st_decomperr); break; case UATH_STATUS_KEY_ERR: UATH_STAT_INC(sc, st_keyerr); break; case UATH_STATUS_ERR: UATH_STAT_INC(sc, st_err); break; default: break; } } CTASSERT(UATH_MIN_RXBUFSZ >= sizeof(struct uath_chunk)); static struct mbuf * uath_data_rxeof(struct usb_xfer *xfer, struct uath_data *data, struct uath_rx_desc **pdesc) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct uath_chunk *chunk; struct uath_rx_desc *desc; struct mbuf *m = data->m, *mnew, *mp; uint16_t chunklen; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); if (actlen < (int)UATH_MIN_RXBUFSZ) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: wrong xfer size (len=%d)\n", __func__, actlen); counter_u64_add(ic->ic_ierrors, 1); return (NULL); } chunk = (struct uath_chunk *)data->buf; chunklen = be16toh(chunk->length); if (chunk->seqnum == 0 && chunk->flags == 0 && chunklen == 0) { device_printf(sc->sc_dev, "%s: strange response\n", __func__); counter_u64_add(ic->ic_ierrors, 1); UATH_RESET_INTRX(sc); return (NULL); } if (chunklen > actlen) { device_printf(sc->sc_dev, "%s: invalid chunk length (len %u > actlen %d)\n", __func__, chunklen, actlen); counter_u64_add(ic->ic_ierrors, 1); /* XXX cleanup? */ UATH_RESET_INTRX(sc); return (NULL); } if (chunk->seqnum != sc->sc_intrx_nextnum) { DPRINTF(sc, UATH_DEBUG_XMIT, "invalid seqnum %d, expected %d\n", chunk->seqnum, sc->sc_intrx_nextnum); UATH_STAT_INC(sc, st_badchunkseqnum); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } /* check multi-chunk frames */ if ((chunk->seqnum == 0 && !(chunk->flags & UATH_CFLAGS_FINAL)) || (chunk->seqnum != 0 && (chunk->flags & UATH_CFLAGS_FINAL)) || chunk->flags & UATH_CFLAGS_RXMSG) UATH_STAT_INC(sc, st_multichunk); if (chunk->flags & UATH_CFLAGS_FINAL) { if (chunklen < sizeof(struct uath_rx_desc)) { device_printf(sc->sc_dev, "%s: invalid chunk length %d\n", __func__, chunklen); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } chunklen -= sizeof(struct uath_rx_desc); } if (chunklen > 0 && (!(chunk->flags & UATH_CFLAGS_FINAL) || !(chunk->seqnum == 0))) { /* we should use intermediate RX buffer */ if (chunk->seqnum == 0) UATH_RESET_INTRX(sc); if ((sc->sc_intrx_len + sizeof(struct uath_rx_desc) + chunklen) > UATH_MAX_INTRX_SIZE) { UATH_STAT_INC(sc, st_invalidlen); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } m->m_len = chunklen; m->m_data += sizeof(struct uath_chunk); if (sc->sc_intrx_head == NULL) { sc->sc_intrx_head = m; sc->sc_intrx_tail = m; } else { m->m_flags &= ~M_PKTHDR; sc->sc_intrx_tail->m_next = m; sc->sc_intrx_tail = m; } } sc->sc_intrx_len += chunklen; mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: can't get new mbuf, drop frame\n", __func__); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } data->m = mnew; data->buf = mtod(mnew, uint8_t *); /* if the frame is not final continue the transfer */ if (!(chunk->flags & UATH_CFLAGS_FINAL)) { sc->sc_intrx_nextnum++; UATH_RESET_INTRX(sc); return (NULL); } /* * if the frame is not set UATH_CFLAGS_RXMSG, then rx descriptor is * located at the end, 32-bit aligned */ desc = (chunk->flags & UATH_CFLAGS_RXMSG) ? (struct uath_rx_desc *)(chunk + 1) : (struct uath_rx_desc *)(((uint8_t *)chunk) + sizeof(struct uath_chunk) + be16toh(chunk->length) - sizeof(struct uath_rx_desc)); if ((uint8_t *)chunk + actlen - sizeof(struct uath_rx_desc) < (uint8_t *)desc) { device_printf(sc->sc_dev, "%s: wrong Rx descriptor pointer " "(desc %p chunk %p actlen %d)\n", __func__, desc, chunk, actlen); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } *pdesc = desc; DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: frame len %u code %u status %u rate %u antenna %u " "rssi %d channel %u phyerror %u connix %u decrypterror %u " "keycachemiss %u\n", __func__, be32toh(desc->framelen) , be32toh(desc->code), be32toh(desc->status), be32toh(desc->rate) , be32toh(desc->antenna), be32toh(desc->rssi), be32toh(desc->channel) , be32toh(desc->phyerror), be32toh(desc->connix) , be32toh(desc->decrypterror), be32toh(desc->keycachemiss)); if (be32toh(desc->len) > MCLBYTES) { DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL, "%s: bad descriptor (len=%d)\n", __func__, be32toh(desc->len)); counter_u64_add(ic->ic_ierrors, 1); UATH_STAT_INC(sc, st_toobigrxpkt); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } uath_update_rxstat(sc, be32toh(desc->status)); /* finalize mbuf */ if (sc->sc_intrx_head == NULL) { uint32_t framelen; if (be32toh(desc->framelen) < UATH_RX_DUMMYSIZE) { device_printf(sc->sc_dev, "%s: framelen too small (%u)\n", __func__, be32toh(desc->framelen)); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } framelen = be32toh(desc->framelen) - UATH_RX_DUMMYSIZE; if (framelen > actlen - sizeof(struct uath_chunk) || framelen < sizeof(struct ieee80211_frame_ack)) { device_printf(sc->sc_dev, "%s: wrong frame length (%u, actlen %d)!\n", __func__, framelen, actlen); counter_u64_add(ic->ic_ierrors, 1); if (sc->sc_intrx_head != NULL) m_freem(sc->sc_intrx_head); UATH_RESET_INTRX(sc); return (NULL); } m->m_pkthdr.len = m->m_len = framelen; m->m_data += sizeof(struct uath_chunk); } else { mp = sc->sc_intrx_head; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sc->sc_intrx_len; m = mp; } /* there are a lot more fields in the RX descriptor */ if ((sc->sc_flags & UATH_FLAG_INVALID) == 0 && ieee80211_radiotap_active(ic)) { struct uath_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_hi = be32toh(desc->tstamp_high); uint32_t tsf_lo = be32toh(desc->tstamp_low); /* XXX only get low order 24bits of tsf from h/w */ tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; if (be32toh(desc->status) == UATH_STATUS_CRC_ERR) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX map other status to BADFCS? */ /* XXX ath h/w rate code, need to map */ tap->wr_rate = be32toh(desc->rate); tap->wr_antenna = be32toh(desc->antenna); tap->wr_antsignal = -95 + be32toh(desc->rssi); tap->wr_antnoise = -95; } UATH_RESET_INTRX(sc); return (m); } static void uath_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct epoch_tracker et; struct mbuf *m = NULL; struct uath_data *data; struct uath_rx_desc *desc = NULL; int8_t nf; UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); UATH_STAT_DEC(sc, st_rx_active); m = uath_data_rxeof(xfer, data, &desc); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); UATH_STAT_INC(sc, st_rx_inactive); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) return; STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); UATH_STAT_DEC(sc, st_rx_inactive); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); UATH_STAT_INC(sc, st_rx_active); usbd_xfer_set_frame_data(xfer, 0, data->buf, MCLBYTES); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ if (sc->sc_flags & UATH_FLAG_INVALID) { if (m != NULL) m_freem(m); return; } UATH_UNLOCK(sc); if (m != NULL && desc != NULL) { wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = -95; /* XXX */ NET_EPOCH_ENTER(et); if (ni != NULL) { (void) ieee80211_input(ni, m, (int)be32toh(desc->rssi), nf); /* node is no longer needed */ ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, (int)be32toh(desc->rssi), nf); NET_EPOCH_EXIT(et); m = NULL; desc = NULL; } UATH_LOCK(sc); uath_start(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); UATH_STAT_DEC(sc, st_rx_active); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); UATH_STAT_INC(sc, st_rx_inactive); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto setup; } break; } } static void uath_data_txeof(struct usb_xfer *xfer, struct uath_data *data) { struct uath_softc *sc = usbd_xfer_softc(xfer); UATH_ASSERT_LOCKED(sc); if (data->m) { /* XXX status? */ ieee80211_tx_complete(data->ni, data->m, 0); data->m = NULL; data->ni = NULL; } sc->sc_tx_timer = 0; } static void uath_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uath_softc *sc = usbd_xfer_softc(xfer); struct uath_data *data; UATH_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); UATH_STAT_DEC(sc, st_tx_active); uath_data_txeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); UATH_STAT_INC(sc, st_tx_inactive); /* FALLTHROUGH */ case USB_ST_SETUP: setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n", __func__); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); UATH_STAT_DEC(sc, st_tx_pending); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); UATH_STAT_INC(sc, st_tx_active); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); uath_start(sc); break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto setup; if (data->ni != NULL) { if_inc_counter(data->ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); if ((sc->sc_flags & UATH_FLAG_INVALID) == 0) ieee80211_free_node(data->ni); data->ni = NULL; } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto setup; } break; } } static device_method_t uath_methods[] = { DEVMETHOD(device_probe, uath_match), DEVMETHOD(device_attach, uath_attach), DEVMETHOD(device_detach, uath_detach), DEVMETHOD_END }; static driver_t uath_driver = { .name = "uath", .methods = uath_methods, .size = sizeof(struct uath_softc) }; DRIVER_MODULE(uath, uhub, uath_driver, NULL, NULL); MODULE_DEPEND(uath, wlan, 1, 1, 1); MODULE_DEPEND(uath, usb, 1, 1, 1); MODULE_VERSION(uath, 1); USB_PNP_HOST_INFO(uath_devs); diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c index c9c2a8f6854e..5ceb037803b8 100644 --- a/sys/dev/wpi/if_wpi.c +++ b/sys/dev/wpi/if_wpi.c @@ -1,5651 +1,5651 @@ /*- * Copyright (c) 2006,2007 * Damien Bergamini * Benjamin Close * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include /* * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. * * The 3945ABG network adapter doesn't use traditional hardware as * many other adaptors do. Instead at run time the eeprom is set into a known * state and told to load boot firmware. The boot firmware loads an init and a * main binary firmware image into SRAM on the card via DMA. * Once the firmware is loaded, the driver/hw then * communicate by way of circular dma rings via the SRAM to the firmware. * * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. * The 4 tx data rings allow for prioritization QoS. * * The rx data ring consists of 32 dma buffers. Two registers are used to * indicate where in the ring the driver and the firmware are up to. The * driver sets the initial read index (reg1) and the initial write index (reg2), * the firmware updates the read index (reg1) on rx of a packet and fires an * interrupt. The driver then processes the buffers starting at reg1 indicating * to the firmware which buffers have been accessed by updating reg2. At the * same time allocating new memory for the processed buffer. * * A similar thing happens with the tx rings. The difference is the firmware * stop processing buffers once the queue is full and until confirmation * of a successful transmition (tx_done) has occurred. * * The command ring operates in the same manner as the tx queues. * * All communication direct to the card (ie eeprom) is classed as Stage1 * communication * * All communication via the firmware to the card is classed as State2. * The firmware consists of 2 parts. A bootstrap firmware and a runtime * firmware. The bootstrap firmware and runtime firmware are loaded * from host memory via dma to the card then told to execute. From this point * on the majority of communications between the driver and the card goes * via the firmware. */ #include "opt_wlan.h" #include "opt_wpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct wpi_ident { uint16_t vendor; uint16_t device; uint16_t subdevice; const char *name; }; static const struct wpi_ident wpi_ident_table[] = { /* The below entries support ABG regardless of the subid */ { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, /* The below entries only support BG */ { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, { 0, 0, 0, NULL } }; static int wpi_probe(device_t); static int wpi_attach(device_t); static void wpi_radiotap_attach(struct wpi_softc *); static void wpi_sysctlattach(struct wpi_softc *); static void wpi_init_beacon(struct wpi_vap *); static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void wpi_vap_delete(struct ieee80211vap *); static int wpi_detach(device_t); static int wpi_shutdown(device_t); static int wpi_suspend(device_t); static int wpi_resume(device_t); static int wpi_nic_lock(struct wpi_softc *); static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, void **, bus_size_t, bus_size_t); static void wpi_dma_contig_free(struct wpi_dma_info *); static int wpi_alloc_shared(struct wpi_softc *); static void wpi_free_shared(struct wpi_softc *); static int wpi_alloc_fwmem(struct wpi_softc *); static void wpi_free_fwmem(struct wpi_softc *); static int wpi_alloc_rx_ring(struct wpi_softc *); static void wpi_update_rx_ring(struct wpi_softc *); static void wpi_update_rx_ring_ps(struct wpi_softc *); static void wpi_reset_rx_ring(struct wpi_softc *); static void wpi_free_rx_ring(struct wpi_softc *); static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, uint8_t); static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static void wpi_update_tx_ring_ps(struct wpi_softc *, struct wpi_tx_ring *); static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static int wpi_read_eeprom(struct wpi_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *, struct ieee80211_channel[]); static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, struct ieee80211_channel *); static void wpi_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static int wpi_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel[]); static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void wpi_node_free(struct ieee80211_node *); static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static void wpi_restore_node(void *, struct ieee80211_node *); static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void wpi_calib_timeout(void *); static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, struct wpi_rx_data *); static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, struct wpi_rx_data *); static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_notif_intr(struct wpi_softc *); static void wpi_wakeup_intr(struct wpi_softc *); #ifdef WPI_DEBUG static void wpi_debug_registers(struct wpi_softc *); #endif static void wpi_fatal_intr(struct wpi_softc *); static void wpi_intr(void *); static void wpi_free_txfrags(struct wpi_softc *, uint16_t); static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); static int wpi_tx_data(struct wpi_softc *, struct mbuf *, struct ieee80211_node *); static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int wpi_transmit(struct ieee80211com *, struct mbuf *); static void wpi_watchdog_rfkill(void *); static void wpi_scan_timeout(void *); static void wpi_tx_timeout(void *); static void wpi_parent(struct ieee80211com *); static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, int); static int wpi_mrr_setup(struct wpi_softc *); static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); static int wpi_add_broadcast_node(struct wpi_softc *, int); static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); static int wpi_updateedca(struct ieee80211com *); static void wpi_set_promisc(struct wpi_softc *); static void wpi_update_promisc(struct ieee80211com *); static void wpi_update_mcast(struct ieee80211com *); static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); static void wpi_power_calibration(struct wpi_softc *); static int wpi_set_txpower(struct wpi_softc *, int); static int wpi_get_power_index(struct wpi_softc *, struct wpi_power_group *, uint8_t, int, int); static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); static int wpi_send_btcoex(struct wpi_softc *); static int wpi_send_rxon(struct wpi_softc *, int, int); static int wpi_config(struct wpi_softc *); static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, struct ieee80211_channel *, uint8_t); static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, struct ieee80211_channel *); static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); static int wpi_config_beacon(struct wpi_vap *); static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); static void wpi_update_beacon(struct ieee80211vap *, int); static void wpi_newassoc(struct ieee80211_node *, int); static int wpi_run(struct wpi_softc *, struct ieee80211vap *); static int wpi_load_key(struct ieee80211_node *, const struct ieee80211_key *); static void wpi_load_key_cb(void *, struct ieee80211_node *); static int wpi_set_global_keys(struct ieee80211_node *); static int wpi_del_key(struct ieee80211_node *, const struct ieee80211_key *); static void wpi_del_key_cb(void *, struct ieee80211_node *); static int wpi_process_key(struct ieee80211vap *, const struct ieee80211_key *, int); static int wpi_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int wpi_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int wpi_post_alive(struct wpi_softc *); static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, uint32_t); static int wpi_load_firmware(struct wpi_softc *); static int wpi_read_firmware(struct wpi_softc *); static void wpi_unload_firmware(struct wpi_softc *); static int wpi_clock_wait(struct wpi_softc *); static int wpi_apm_init(struct wpi_softc *); static void wpi_apm_stop_master(struct wpi_softc *); static void wpi_apm_stop(struct wpi_softc *); static void wpi_nic_config(struct wpi_softc *); static int wpi_hw_init(struct wpi_softc *); static void wpi_hw_stop(struct wpi_softc *); static void wpi_radio_on(void *, int); static void wpi_radio_off(void *, int); static int wpi_init(struct wpi_softc *); static void wpi_stop_locked(struct wpi_softc *); static void wpi_stop(struct wpi_softc *); static void wpi_scan_start(struct ieee80211com *); static void wpi_scan_end(struct ieee80211com *); static void wpi_set_channel(struct ieee80211com *); static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void wpi_scan_mindwell(struct ieee80211_scan_state *); static device_method_t wpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wpi_probe), DEVMETHOD(device_attach, wpi_attach), DEVMETHOD(device_detach, wpi_detach), DEVMETHOD(device_shutdown, wpi_shutdown), DEVMETHOD(device_suspend, wpi_suspend), DEVMETHOD(device_resume, wpi_resume), DEVMETHOD_END }; static driver_t wpi_driver = { "wpi", wpi_methods, sizeof (struct wpi_softc) }; DRIVER_MODULE(wpi, pci, wpi_driver, NULL, NULL); MODULE_VERSION(wpi, 1); MODULE_DEPEND(wpi, pci, 1, 1, 1); MODULE_DEPEND(wpi, wlan, 1, 1, 1); MODULE_DEPEND(wpi, firmware, 1, 1, 1); static int wpi_probe(device_t dev) { const struct wpi_ident *ident; for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int wpi_attach(device_t dev) { struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); struct ieee80211com *ic; uint8_t i; int error, rid; #ifdef WPI_DEBUG int supportsa = 1; const struct wpi_ident *ident; #endif sc->sc_dev = dev; #ifdef WPI_DEBUG error = resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); if (error != 0) sc->sc_debug = 0; #else sc->sc_debug = 0; #endif DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* * Get the offset of the PCI Express Capability Structure in PCI * Configuration Space. */ error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); if (error != 0) { device_printf(dev, "PCIe capability structure not found!\n"); return error; } /* * Some card's only support 802.11b/g not a, check to see if * this is one such card. A 0x0 in the subdevice table indicates * the entire subdevice range is to be ignored. */ #ifdef WPI_DEBUG for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (ident->subdevice && pci_get_subdevice(dev) == ident->subdevice) { supportsa = 0; break; } } #endif /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); /* Enable bus-mastering. */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "can't map mem space\n"); return ENOMEM; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); rid = 1; if (pci_alloc_msi(dev, &rid) == 0) rid = 1; else rid = 0; /* Install interrupt handler. */ sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->irq == NULL) { device_printf(dev, "can't map interrupt\n"); error = ENOMEM; goto fail; } WPI_LOCK_INIT(sc); WPI_TX_LOCK_INIT(sc); WPI_RXON_LOCK_INIT(sc); WPI_NT_LOCK_INIT(sc); WPI_TXQ_LOCK_INIT(sc); WPI_TXQ_STATE_LOCK_INIT(sc); /* Allocate DMA memory for firmware transfers. */ if ((error = wpi_alloc_fwmem(sc)) != 0) { device_printf(dev, "could not allocate memory for firmware, error %d\n", error); goto fail; } /* Allocate shared page. */ if ((error = wpi_alloc_shared(sc)) != 0) { device_printf(dev, "could not allocate shared page\n"); goto fail; } /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { device_printf(dev, "could not allocate TX ring %d, error %d\n", i, error); goto fail; } } /* Allocate RX ring. */ if ((error = wpi_alloc_rx_ring(sc)) != 0) { device_printf(dev, "could not allocate RX ring, error %d\n", error); goto fail; } /* Clear pending interrupts. */ WPI_WRITE(sc, WPI_INT, 0xffffffff); ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_HOSTAP /* Host access point mode */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_PMGT /* Station-side power mgmt */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_AES_CCM; /* * Read in the eeprom and also setup the channels for * net80211. We don't set the rates as net80211 does this for us */ if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { device_printf(dev, "could not read EEPROM, error %d\n", error); goto fail; } #ifdef WPI_DEBUG if (bootverbose) { device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", sc->domain); device_printf(sc->sc_dev, "Hardware Type: %c\n", sc->type > 1 ? 'B': '?'); device_printf(sc->sc_dev, "Hardware Revision: %c\n", ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); device_printf(sc->sc_dev, "SKU %s support 802.11a\n", supportsa ? "does" : "does not"); /* XXX hw_config uses the PCIDEV for the Hardware rev. Must check what sc->rev really represents - benjsc 20070615 */ } #endif ieee80211_ifattach(ic); ic->ic_vap_create = wpi_vap_create; ic->ic_vap_delete = wpi_vap_delete; ic->ic_parent = wpi_parent; ic->ic_raw_xmit = wpi_raw_xmit; ic->ic_transmit = wpi_transmit; ic->ic_node_alloc = wpi_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = wpi_node_free; ic->ic_wme.wme_update = wpi_updateedca; ic->ic_update_promisc = wpi_update_promisc; ic->ic_update_mcast = wpi_update_mcast; ic->ic_newassoc = wpi_newassoc; ic->ic_scan_start = wpi_scan_start; ic->ic_scan_end = wpi_scan_end; ic->ic_set_channel = wpi_set_channel; ic->ic_scan_curchan = wpi_scan_curchan; ic->ic_scan_mindwell = wpi_scan_mindwell; ic->ic_getradiocaps = wpi_getradiocaps; ic->ic_setregdomain = wpi_setregdomain; sc->sc_update_rx_ring = wpi_update_rx_ring; sc->sc_update_tx_ring = wpi_update_tx_ring; wpi_radiotap_attach(sc); /* Setup Tx status flags (constant). */ sc->sc_txs.flags = IEEE80211_RATECTL_STATUS_PKTLEN | IEEE80211_RATECTL_STATUS_SHORT_RETRY | IEEE80211_RATECTL_STATUS_LONG_RETRY; callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); wpi_sysctlattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, wpi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "can't establish interrupt, error %d\n", error); goto fail; } if (bootverbose) ieee80211_announce(ic); #ifdef WPI_DEBUG if (sc->sc_debug & WPI_DEBUG_HW) ieee80211_announce_channels(ic); #endif DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; fail: wpi_detach(dev); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } /* * Attach the interface to 802.11 radiotap. */ static void wpi_radiotap_attach(struct wpi_softc *sc) { struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); ieee80211_radiotap_attach(&sc->sc_ic, &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); } static void wpi_sysctlattach(struct wpi_softc *sc) { #ifdef WPI_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, "control debugging printfs"); #endif } static void wpi_init_beacon(struct wpi_vap *wvp) { struct wpi_buf *bcn = &wvp->wv_bcbuf; struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; cmd->id = WPI_ID_BROADCAST; cmd->ofdm_mask = 0xff; cmd->cck_mask = 0x0f; cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); /* * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue * XXX by using WPI_TX_NEED_ACK instead (with some side effects). */ cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); bcn->code = WPI_CMD_SET_BEACON; bcn->ac = WPI_CMD_QUEUE_NUM; bcn->size = sizeof(struct wpi_cmd_beacon); } static struct ieee80211vap * wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wpi_vap *wvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &wvp->wv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { WPI_VAP_LOCK_INIT(wvp); wpi_init_beacon(wvp); } /* Override with driver methods. */ vap->iv_key_set = wpi_key_set; vap->iv_key_delete = wpi_key_delete; if (opmode == IEEE80211_M_IBSS) { wvp->wv_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; } wvp->wv_newstate = vap->iv_newstate; vap->iv_newstate = wpi_newstate; vap->iv_update_beacon = wpi_update_beacon; vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; ieee80211_ratectl_init(vap); /* Complete setup. */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void wpi_vap_delete(struct ieee80211vap *vap) { struct wpi_vap *wvp = WPI_VAP(vap); struct wpi_buf *bcn = &wvp->wv_bcbuf; enum ieee80211_opmode opmode = vap->iv_opmode; ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { if (bcn->m != NULL) m_freem(bcn->m); WPI_VAP_LOCK_DESTROY(wvp); } free(wvp, M_80211_VAP); } static int wpi_detach(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint8_t qid; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if (ic->ic_vap_create == wpi_vap_create) { ieee80211_draintask(ic, &sc->sc_radioon_task); ieee80211_draintask(ic, &sc->sc_radiooff_task); wpi_stop(sc); callout_drain(&sc->watchdog_rfkill); callout_drain(&sc->tx_timeout); callout_drain(&sc->scan_timeout); callout_drain(&sc->calib_to); ieee80211_ifdetach(ic); } /* Uninstall interrupt handler. */ if (sc->irq != NULL) { bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); pci_release_msi(dev); } if (sc->txq[0].data_dmat) { /* Free DMA resources. */ for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) wpi_free_tx_ring(sc, &sc->txq[qid]); wpi_free_rx_ring(sc); wpi_free_shared(sc); } if (sc->fw_dma.tag) wpi_free_fwmem(sc); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); WPI_TXQ_STATE_LOCK_DESTROY(sc); WPI_TXQ_LOCK_DESTROY(sc); WPI_NT_LOCK_DESTROY(sc); WPI_RXON_LOCK_DESTROY(sc); WPI_TX_LOCK_DESTROY(sc); WPI_LOCK_DESTROY(sc); return 0; } static int wpi_shutdown(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); wpi_stop(sc); return 0; } static int wpi_suspend(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; ieee80211_suspend_all(ic); return 0; } static int wpi_resume(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } /* * Grab exclusive access to NIC memory. */ static int wpi_nic_lock(struct wpi_softc *sc) { int ntries; /* Request exclusive access to NIC. */ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); /* Spin until we actually get the lock. */ for (ntries = 0; ntries < 1000; ntries++) { if ((WPI_READ(sc, WPI_GP_CNTRL) & (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == WPI_GP_CNTRL_MAC_ACCESS_ENA) return 0; DELAY(10); } device_printf(sc->sc_dev, "could not lock memory\n"); return ETIMEDOUT; } /* * Release lock on NIC memory. */ static __inline void wpi_nic_unlock(struct wpi_softc *sc) { WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); } static __inline uint32_t wpi_prph_read(struct wpi_softc *sc, uint32_t addr) { WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); WPI_BARRIER_READ_WRITE(sc); return WPI_READ(sc, WPI_PRPH_RDATA); } static __inline void wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) { WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); WPI_BARRIER_WRITE(sc); WPI_WRITE(sc, WPI_PRPH_WDATA, data); } static __inline void wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) { wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); } static __inline void wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) { wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); } static __inline void wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, const uint32_t *data, uint32_t count) { for (; count != 0; count--, data++, addr += 4) wpi_prph_write(sc, addr, *data); } static __inline uint32_t wpi_mem_read(struct wpi_softc *sc, uint32_t addr) { WPI_WRITE(sc, WPI_MEM_RADDR, addr); WPI_BARRIER_READ_WRITE(sc); return WPI_READ(sc, WPI_MEM_RDATA); } static __inline void wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, int count) { for (; count > 0; count--, addr += 4) *data++ = wpi_mem_read(sc, addr); } static int wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) { uint8_t *out = data; uint32_t val; int error, ntries; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if ((error = wpi_nic_lock(sc)) != 0) return error; for (; count > 0; count -= 2, addr++) { WPI_WRITE(sc, WPI_EEPROM, addr << 2); for (ntries = 0; ntries < 10; ntries++) { val = WPI_READ(sc, WPI_EEPROM); if (val & WPI_EEPROM_READ_VALID) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev, "timeout reading ROM at 0x%x\n", addr); return ETIMEDOUT; } *out++= val >> 16; if (count > 1) *out ++= val >> 24; } wpi_nic_unlock(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static void wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } /* * Allocates a contiguous block of dma memory of the requested size and * alignment. */ static int wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment) { int error; dma->tag = NULL; dma->size = size; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &dma->tag); if (error != 0) goto fail; error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); if (error != 0) goto fail; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: wpi_dma_contig_free(dma); return error; } static void wpi_dma_contig_free(struct wpi_dma_info *dma) { if (dma->vaddr != NULL) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); dma->vaddr = NULL; } if (dma->tag != NULL) { bus_dma_tag_destroy(dma->tag); dma->tag = NULL; } } /* * Allocate a shared page between host and NIC. */ static int wpi_alloc_shared(struct wpi_softc *sc) { /* Shared buffer must be aligned on a 4KB boundary. */ return wpi_dma_contig_alloc(sc, &sc->shared_dma, (void **)&sc->shared, sizeof (struct wpi_shared), 4096); } static void wpi_free_shared(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->shared_dma); } /* * Allocate DMA-safe memory for firmware transfer. */ static int wpi_alloc_fwmem(struct wpi_softc *sc) { /* Must be aligned on a 16-byte boundary. */ return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); } static void wpi_free_fwmem(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->fw_dma); } static int wpi_alloc_rx_ring(struct wpi_softc *sc) { struct wpi_rx_ring *ring = &sc->rxq; bus_size_t size; int i, error; ring->cur = 0; ring->update = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Allocate RX descriptors (16KB aligned.) */ size = WPI_RX_RING_COUNT * sizeof (uint32_t); error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate RX ring DMA memory, error %d\n", __func__, error); goto fail; } /* Create RX buffer DMA tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA tag, error %d\n", __func__, error); goto fail; } /* * Allocate and map RX buffers. */ for (i = 0; i < WPI_RX_RING_COUNT; i++) { struct wpi_rx_data *data = &ring->data[i]; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA map, error %d\n", __func__, error); goto fail; } data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (data->m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); error = ENOBUFS; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); goto fail; } /* Set physical address of RX buffer. */ ring->desc[i] = htole32(paddr); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; fail: wpi_free_rx_ring(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } static void wpi_update_rx_ring(struct wpi_softc *sc) { WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); } static void wpi_update_rx_ring_ps(struct wpi_softc *sc) { struct wpi_rx_ring *ring = &sc->rxq; if (ring->update != 0) { /* Wait for INT_WAKEUP event. */ return; } WPI_TXQ_LOCK(sc); WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", __func__); ring->update = 1; } else { wpi_update_rx_ring(sc); WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); } WPI_TXQ_UNLOCK(sc); } static void wpi_reset_rx_ring(struct wpi_softc *sc) { struct wpi_rx_ring *ring = &sc->rxq; int ntries; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (wpi_nic_lock(sc) == 0) { WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); for (ntries = 0; ntries < 1000; ntries++) { if (WPI_READ(sc, WPI_FH_RX_STATUS) & WPI_FH_RX_STATUS_IDLE) break; DELAY(10); } wpi_nic_unlock(sc); } ring->cur = 0; ring->update = 0; } static void wpi_free_rx_ring(struct wpi_softc *sc) { struct wpi_rx_ring *ring = &sc->rxq; int i; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); wpi_dma_contig_free(&ring->desc_dma); for (i = 0; i < WPI_RX_RING_COUNT; i++) { struct wpi_rx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } static int wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) { bus_addr_t paddr; bus_size_t size; int i, error; ring->qid = qid; ring->queued = 0; ring->cur = 0; ring->pending = 0; ring->update = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Allocate TX descriptors (16KB aligned.) */ size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX ring DMA memory, error %d\n", __func__, error); goto fail; } /* Update shared area with ring physical address. */ sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, BUS_DMASYNC_PREWRITE); size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, size, 4); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX cmd DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA tag, error %d\n", __func__, error); goto fail; } paddr = ring->cmd_dma.paddr; for (i = 0; i < WPI_TX_RING_COUNT; i++) { struct wpi_tx_data *data = &ring->data[i]; data->cmd_paddr = paddr; paddr += sizeof (struct wpi_tx_cmd); error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA map, error %d\n", __func__, error); goto fail; } } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; fail: wpi_free_tx_ring(sc, ring); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } static void wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); } static void wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) { if (ring->update != 0) { /* Wait for INT_WAKEUP event. */ return; } WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", __func__, ring->qid); ring->update = 1; } else { wpi_update_tx_ring(sc, ring); WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); } } static void wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { int i; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); for (i = 0; i < WPI_TX_RING_COUNT; i++) { struct wpi_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } /* Clear TX descriptors. */ memset(ring->desc, 0, ring->desc_dma.size); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = 0; ring->pending = 0; ring->update = 0; } static void wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { int i; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); wpi_dma_contig_free(&ring->desc_dma); wpi_dma_contig_free(&ring->cmd_dma); for (i = 0; i < WPI_TX_RING_COUNT; i++) { struct wpi_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } /* * Extract various information from EEPROM. */ static int wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { #define WPI_CHK(res) do { \ if ((error = res) != 0) \ goto fail; \ } while (0) uint8_t i; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Adapter has to be powered on for EEPROM access to work. */ if ((error = wpi_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { device_printf(sc->sc_dev, "bad EEPROM signature\n"); error = EIO; goto fail; } /* Clear HW ownership of EEPROM. */ WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); /* Read the hardware capabilities, revision and SKU type. */ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, sizeof(sc->cap))); WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, sizeof(sc->rev))); WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, sizeof(sc->type))); sc->rev = le16toh(sc->rev); DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, sc->rev, sc->type); /* Read the regulatory domain (4 ASCII characters.) */ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, sizeof(sc->domain))); /* Read MAC address. */ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, IEEE80211_ADDR_LEN)); /* Read the list of authorized channels. */ for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) WPI_CHK(wpi_read_eeprom_channels(sc, i)); /* Read the list of TX power groups. */ for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) WPI_CHK(wpi_read_eeprom_group(sc, i)); fail: wpi_apm_stop(sc); /* Power OFF adapter. */ DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, __func__); return error; #undef WPI_CHK } /* * Translate EEPROM flags to net80211. */ static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) { uint32_t nflags; nflags = 0; if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) nflags |= IEEE80211_CHAN_PASSIVE; if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) nflags |= IEEE80211_CHAN_NOADHOC; if (channel->flags & WPI_EEPROM_CHAN_RADAR) { nflags |= IEEE80211_CHAN_DFS; /* XXX apparently IBSS may still be marked */ nflags |= IEEE80211_CHAN_NOADHOC; } /* XXX HOSTAP uses WPI_MODE_IBSS */ if (nflags & IEEE80211_CHAN_NOADHOC) nflags |= IEEE80211_CHAN_NOHOSTAP; return nflags; } static void wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; const struct wpi_chan_band *band = &wpi_bands[n]; uint32_t nflags; uint8_t bands[IEEE80211_MODE_BYTES]; uint8_t chan, i; int error; memset(bands, 0, sizeof(bands)); if (n == 0) { setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); } else setbit(bands, IEEE80211_MODE_11A); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { DPRINTF(sc, WPI_DEBUG_EEPROM, "Channel Not Valid: %d, band %d\n", band->chan[i],n); continue; } chan = band->chan[i]; nflags = wpi_eeprom_channel_flags(&channels[i]); error = ieee80211_add_channel(chans, maxchans, nchans, chan, 0, channels[i].maxpwr, nflags, bands); if (error != 0) break; /* Save maximum allowed TX power for this channel. */ sc->maxpwr[chan] = channels[i].maxpwr; DPRINTF(sc, WPI_DEBUG_EEPROM, "adding chan %d flags=0x%x maxpwr=%d, offset %d\n", chan, channels[i].flags, sc->maxpwr[chan], *nchans); } } /** * Read the eeprom to find out what channels are valid for the given * band and update net80211 with what we find. */ static int wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) { struct ieee80211com *ic = &sc->sc_ic; const struct wpi_chan_band *band = &wpi_bands[n]; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], band->nchan * sizeof (struct wpi_eeprom_chan)); if (error != 0) { DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static struct wpi_eeprom_chan * wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) { int i, j; for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) for (i = 0; i < wpi_bands[j].nchan; i++) if (wpi_bands[j].chan[i] == c->ic_ieee && ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) return &sc->eeprom_channels[j][i]; return NULL; } static void wpi_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct wpi_softc *sc = ic->ic_softc; int i; /* Parse the list of authorized channels. */ for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++) wpi_read_eeprom_band(sc, i, maxchans, nchans, chans); } /* * Enforce flags read from EEPROM. */ static int wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct wpi_softc *sc = ic->ic_softc; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; struct wpi_eeprom_chan *channel; channel = wpi_find_eeprom_channel(sc, c); if (channel == NULL) { ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } c->ic_flags |= wpi_eeprom_channel_flags(channel); } return 0; } static int wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) { struct wpi_power_group *group = &sc->groups[n]; struct wpi_eeprom_group rgroup; int i, error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, &rgroup, sizeof rgroup)) != 0) { DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } /* Save TX power group information. */ group->chan = rgroup.chan; group->maxpwr = rgroup.maxpwr; /* Retrieve temperature at which the samples were taken. */ group->temp = (int16_t)le16toh(rgroup.temp); DPRINTF(sc, WPI_DEBUG_EEPROM, "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, group->maxpwr, group->temp); for (i = 0; i < WPI_SAMPLES_COUNT; i++) { group->samples[i].index = rgroup.samples[i].index; group->samples[i].power = rgroup.samples[i].power; DPRINTF(sc, WPI_DEBUG_EEPROM, "\tsample %d: index=%d power=%d\n", i, group->samples[i].index, group->samples[i].power); } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static __inline uint8_t wpi_add_node_entry_adhoc(struct wpi_softc *sc) { uint8_t newid = WPI_ID_IBSS_MIN; for (; newid <= WPI_ID_IBSS_MAX; newid++) { if ((sc->nodesmsk & (1 << newid)) == 0) { sc->nodesmsk |= 1 << newid; return newid; } } return WPI_ID_UNDEFINED; } static __inline uint8_t wpi_add_node_entry_sta(struct wpi_softc *sc) { sc->nodesmsk |= 1 << WPI_ID_BSS; return WPI_ID_BSS; } static __inline int wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) { if (id == WPI_ID_UNDEFINED) return 0; return (sc->nodesmsk >> id) & 1; } static __inline void wpi_clear_node_table(struct wpi_softc *sc) { sc->nodesmsk = 0; } static __inline void wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) { sc->nodesmsk &= ~(1 << id); } static struct ieee80211_node * wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wpi_node *wn; wn = malloc(sizeof (struct wpi_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (wn == NULL) return NULL; wn->id = WPI_ID_UNDEFINED; return &wn->ni; } static void wpi_node_free(struct ieee80211_node *ni) { struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); if (wn->id != WPI_ID_UNDEFINED) { WPI_NT_LOCK(sc); if (wpi_check_node_entry(sc, wn->id)) { wpi_del_node_entry(sc, wn->id); wpi_del_node(sc, ni); } WPI_NT_UNLOCK(sc); } sc->sc_node_free(ni); } static __inline int wpi_check_bss_filter(struct wpi_softc *sc) { return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; } static void wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = vap->iv_ic->ic_softc; struct wpi_vap *wvp = WPI_VAP(vap); uint64_t ni_tstamp, rx_tstamp; wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); if (vap->iv_state == IEEE80211_S_RUN && (subtype == IEEE80211_FC0_SUBTYPE_BEACON || subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { ni_tstamp = le64toh(ni->ni_tstamp.tsf); rx_tstamp = le64toh(sc->rx_tstamp); if (ni_tstamp >= rx_tstamp) { DPRINTF(sc, WPI_DEBUG_STATE, "ibss merge, tsf %ju tstamp %ju\n", (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); (void) ieee80211_ibss_merge(ni); } } } static void wpi_restore_node(void *arg, struct ieee80211_node *ni) { struct wpi_softc *sc = arg; struct wpi_node *wn = WPI_NODE(ni); int error; WPI_NT_LOCK(sc); if (wn->id != WPI_ID_UNDEFINED) { wn->id = WPI_ID_UNDEFINED; if ((error = wpi_add_ibss_node(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not add IBSS node, error %d\n", __func__, error); } } WPI_NT_UNLOCK(sc); } static void wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) { struct ieee80211com *ic = &sc->sc_ic; /* Set group keys once. */ WPI_NT_LOCK(sc); wvp->wv_gtk = 0; WPI_NT_UNLOCK(sc); ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); ieee80211_crypto_reload_keys(ic); } /** * Called by net80211 when ever there is a change to 80211 state machine */ static int wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct wpi_vap *wvp = WPI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct wpi_softc *sc = ic->ic_softc; int error = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); WPI_TXQ_LOCK(sc); if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); WPI_TXQ_UNLOCK(sc); return ENXIO; } WPI_TXQ_UNLOCK(sc); DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not set power saving level\n", __func__); return error; } wpi_set_led(sc, WPI_LED_LINK, 1, 0); } switch (nstate) { case IEEE80211_S_SCAN: WPI_RXON_LOCK(sc); if (wpi_check_bss_filter(sc) != 0) { sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); } } WPI_RXON_UNLOCK(sc); break; case IEEE80211_S_ASSOC: if (vap->iv_state != IEEE80211_S_RUN) break; /* FALLTHROUGH */ case IEEE80211_S_AUTH: /* * NB: do not optimize AUTH -> AUTH state transmission - * this will break powersave with non-QoS AP! */ /* * The node must be registered in the firmware before auth. * Also the associd must be cleared on RUN -> ASSOC * transitions. */ if ((error = wpi_auth(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to AUTH state, error %d\n", __func__, error); } break; case IEEE80211_S_RUN: /* * RUN -> RUN transition: * STA mode: Just restart the timers. * IBSS mode: Process IBSS merge. */ if (vap->iv_state == IEEE80211_S_RUN) { if (vap->iv_opmode != IEEE80211_M_IBSS) { WPI_RXON_LOCK(sc); wpi_calib_timeout(sc); WPI_RXON_UNLOCK(sc); break; } else { /* * Drop the BSS_FILTER bit * (there is no another way to change bssid). */ WPI_RXON_LOCK(sc); sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); } WPI_RXON_UNLOCK(sc); /* Restore all what was lost. */ wpi_restore_node_table(sc, wvp); /* XXX set conditionally? */ wpi_updateedca(ic); } } /* * !RUN -> RUN requires setting the association id * which is done with a firmware cmd. We also defer * starting the timers until that work is done. */ if ((error = wpi_run(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to RUN state\n", __func__); } break; default: break; } if (error != 0) { DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return wvp->wv_newstate(vap, nstate, arg); } static void wpi_calib_timeout(void *arg) { struct wpi_softc *sc = arg; if (wpi_check_bss_filter(sc) == 0) return; wpi_power_calibration(sc); callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); } static __inline uint8_t rate2plcp(const uint8_t rate) { switch (rate) { case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; default: return 0; } } static __inline uint8_t plcp2rate(const uint8_t plcp) { switch (plcp) { case 0xd: return 12; case 0xf: return 18; case 0x5: return 24; case 0x7: return 36; case 0x9: return 48; case 0xb: return 72; case 0x1: return 96; case 0x3: return 108; case 10: return 2; case 20: return 4; case 55: return 11; case 110: return 22; default: return 0; } } /* Quickly determine if a given rate is CCK or OFDM. */ #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) static void wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, struct wpi_rx_data *data) { struct epoch_tracker et; struct ieee80211com *ic = &sc->sc_ic; struct wpi_rx_ring *ring = &sc->rxq; struct wpi_rx_stat *stat; struct wpi_rx_head *head; struct wpi_rx_tail *tail; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m, *m1; bus_addr_t paddr; uint32_t flags; uint16_t len; int error; stat = (struct wpi_rx_stat *)(desc + 1); if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { device_printf(sc->sc_dev, "invalid RX statistic header\n"); goto fail1; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); len = le16toh(head->len); tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); flags = le32toh(tail->flags); DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" " rate %x chan %d tstamp %ju\n", __func__, ring->cur, le32toh(desc->len), len, (int8_t)stat->rssi, head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); /* Discard frames with a bad FCS early. */ if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", __func__, flags); goto fail1; } /* Discard frames that are too short. */ if (len < sizeof (struct ieee80211_frame_ack)) { DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", __func__, len); goto fail1; } m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (__predict_false(m1 == NULL)) { DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", __func__); goto fail1; } bus_dmamap_unload(ring->data_dmat, data->map); error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (__predict_false(error != 0 && error != EFBIG)) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m1); /* Try to reload the old mbuf. */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { panic("%s: could not load old RX mbuf", __func__); } /* Physical address may have changed. */ ring->desc[ring->cur] = htole32(paddr); bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); goto fail1; } m = data->m; data->m = m1; /* Update RX descriptor. */ ring->desc[ring->cur] = htole32(paddr); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Finalize mbuf. */ m->m_data = (caddr_t)(head + 1); m->m_pkthdr.len = m->m_len = len; /* Grab a reference to the source node. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { /* Check whether decryption was successful or not. */ if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { DPRINTF(sc, WPI_DEBUG_RECV, "CCMP decryption failed 0x%x\n", flags); goto fail2; } m->m_flags |= M_WEP; } if (len >= sizeof(struct ieee80211_frame_min)) ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); else ni = NULL; sc->rx_tstamp = tail->tstamp; if (ieee80211_radiotap_active(ic)) { struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; tap->wr_tsft = tail->tstamp; tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; tap->wr_rate = plcp2rate(head->plcp); } WPI_UNLOCK(sc); NET_EPOCH_ENTER(et); /* Send the frame to the 802.11 layer. */ if (ni != NULL) { (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); /* Node is no longer needed. */ ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); NET_EPOCH_EXIT(et); WPI_LOCK(sc); return; fail2: m_freem(m); fail1: counter_u64_add(ic->ic_ierrors, 1); } static void wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, struct wpi_rx_data *data) { /* Ignore */ } static void wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; struct wpi_tx_data *data = &ring->data[desc->idx]; struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); struct mbuf *m; struct ieee80211_node *ni; uint32_t status = le32toh(stat->status); KASSERT(data->ni != NULL, ("no node")); KASSERT(data->m != NULL, ("no mbuf")); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, stat->btkillcnt, stat->rate, le32toh(stat->duration), status); /* Unmap and free mbuf. */ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; /* Restore frame header. */ KASSERT(M_LEADINGSPACE(m) >= data->hdrlen, ("no frame header!")); M_PREPEND(m, data->hdrlen, M_NOWAIT); KASSERT(m != NULL, ("%s: m is NULL\n", __func__)); /* * Update rate control statistics for the node. */ txs->pktlen = m->m_pkthdr.len; txs->short_retries = stat->rtsfailcnt; txs->long_retries = stat->ackfailcnt / WPI_NTRIES_DEFAULT; if (!(status & WPI_TX_STATUS_FAIL)) txs->status = IEEE80211_RATECTL_TX_SUCCESS; else { switch (status & 0xff) { case WPI_TX_STATUS_FAIL_SHORT_LIMIT: txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; break; case WPI_TX_STATUS_FAIL_LONG_LIMIT: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; break; case WPI_TX_STATUS_FAIL_LIFE_EXPIRE: txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; break; default: txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; break; } } ieee80211_ratectl_tx_complete(ni, txs); ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); WPI_TXQ_STATE_LOCK(sc); if (--ring->queued > 0) callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); else callout_stop(&sc->tx_timeout); WPI_TXQ_STATE_UNLOCK(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); } /* * Process a "command done" firmware notification. This is where we wakeup * processes waiting for a synchronous command completion. */ static void wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " "type %s len %d\n", desc->qid, desc->idx, desc->flags, wpi_cmd_str(desc->type), le32toh(desc->len)); if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) return; /* Not a command ack. */ KASSERT(ring->queued == 0, ("ring->queued must be 0")); data = &ring->data[desc->idx]; cmd = &ring->cmd[desc->idx]; /* If the command was mapped in an mbuf, free it. */ if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } wakeup(cmd); if (desc->type == WPI_CMD_SET_POWER_MODE) { struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_POSTREAD); WPI_TXQ_LOCK(sc); if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { sc->sc_update_rx_ring = wpi_update_rx_ring_ps; sc->sc_update_tx_ring = wpi_update_tx_ring_ps; } else { sc->sc_update_rx_ring = wpi_update_rx_ring; sc->sc_update_tx_ring = wpi_update_tx_ring; } WPI_TXQ_UNLOCK(sc); } } static void wpi_notif_intr(struct wpi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t hw; bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, BUS_DMASYNC_POSTREAD); hw = le32toh(sc->shared->next) & 0xfff; hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; while (sc->rxq.cur != hw) { sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct wpi_rx_desc *desc; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); desc = mtod(data->m, struct wpi_rx_desc *); DPRINTF(sc, WPI_DEBUG_NOTIFY, "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { /* Reply to a command. */ wpi_cmd_done(sc, desc); } switch (desc->type) { case WPI_RX_DONE: /* An 802.11 frame has been received. */ wpi_rx_done(sc, desc, data); if (__predict_false(sc->sc_running == 0)) { /* wpi_stop() was called. */ return; } break; case WPI_TX_DONE: /* An 802.11 frame has been transmitted. */ wpi_tx_done(sc, desc); break; case WPI_RX_STATISTICS: case WPI_BEACON_STATISTICS: wpi_rx_statistics(sc, desc, data); break; case WPI_BEACON_MISSED: { struct wpi_beacon_missed *miss = (struct wpi_beacon_missed *)(desc + 1); uint32_t expected, misses, received, threshold; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); misses = le32toh(miss->consecutive); expected = le32toh(miss->expected); received = le32toh(miss->received); threshold = MAX(2, vap->iv_bmissthreshold); DPRINTF(sc, WPI_DEBUG_BMISS, "%s: beacons missed %u(%u) (received %u/%u)\n", __func__, misses, le32toh(miss->total), received, expected); if (misses >= threshold || (received == 0 && expected >= threshold)) { WPI_RXON_LOCK(sc); if (callout_pending(&sc->scan_timeout)) { wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 0, 1); } WPI_RXON_UNLOCK(sc); if (vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) ieee80211_beacon_miss(ic); } break; } #ifdef WPI_DEBUG case WPI_BEACON_SENT: { struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); uint64_t *tsf = (uint64_t *)(stat + 1); uint32_t *mode = (uint32_t *)(tsf + 1); bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); DPRINTF(sc, WPI_DEBUG_BEACON, "beacon sent: rts %u, ack %u, btkill %u, rate %u, " "duration %u, status %x, tsf %ju, mode %x\n", stat->rtsfailcnt, stat->ackfailcnt, stat->btkillcnt, stat->rate, le32toh(stat->duration), le32toh(stat->status), le64toh(*tsf), le32toh(*mode)); break; } #endif case WPI_UC_READY: { struct wpi_ucode_info *uc = (struct wpi_ucode_info *)(desc + 1); /* The microcontroller is ready. */ bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); DPRINTF(sc, WPI_DEBUG_RESET, "microcode alive notification version=%d.%d " "subtype=%x alive=%x\n", uc->major, uc->minor, uc->subtype, le32toh(uc->valid)); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed\n"); wpi_stop_locked(sc); return; } /* Save the address of the error log in SRAM. */ sc->errptr = le32toh(uc->errptr); break; } case WPI_STATE_CHANGED: { bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); uint32_t *status = (uint32_t *)(desc + 1); DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", le32toh(*status)); if (le32toh(*status) & 1) { WPI_NT_LOCK(sc); wpi_clear_node_table(sc); WPI_NT_UNLOCK(sc); ieee80211_runtask(ic, &sc->sc_radiooff_task); return; } break; } #ifdef WPI_DEBUG case WPI_START_SCAN: { bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); struct wpi_start_scan *scan = (struct wpi_start_scan *)(desc + 1); DPRINTF(sc, WPI_DEBUG_SCAN, "%s: scanning channel %d status %x\n", __func__, scan->chan, le32toh(scan->status)); break; } #endif case WPI_STOP_SCAN: { bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); struct wpi_stop_scan *scan = (struct wpi_stop_scan *)(desc + 1); DPRINTF(sc, WPI_DEBUG_SCAN, "scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan); WPI_RXON_LOCK(sc); callout_stop(&sc->scan_timeout); WPI_RXON_UNLOCK(sc); if (scan->status == WPI_SCAN_ABORTED) ieee80211_cancel_scan(vap); else ieee80211_scan_next(vap); break; } } if (sc->rxq.cur % 8 == 0) { /* Tell the firmware what we have processed. */ sc->sc_update_rx_ring(sc); } } } /* * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up * from power-down sleep mode. */ static void wpi_wakeup_intr(struct wpi_softc *sc) { int qid; DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: ucode wakeup from power-down sleep\n", __func__); /* Wakeup RX and TX rings. */ if (sc->rxq.update) { sc->rxq.update = 0; wpi_update_rx_ring(sc); } WPI_TXQ_LOCK(sc); for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { struct wpi_tx_ring *ring = &sc->txq[qid]; if (ring->update) { ring->update = 0; wpi_update_tx_ring(sc, ring); } } WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); WPI_TXQ_UNLOCK(sc); } /* * This function prints firmware registers */ #ifdef WPI_DEBUG static void wpi_debug_registers(struct wpi_softc *sc) { size_t i; static const uint32_t csr_tbl[] = { WPI_HW_IF_CONFIG, WPI_INT, WPI_INT_MASK, WPI_FH_INT, WPI_GPIO_IN, WPI_RESET, WPI_GP_CNTRL, WPI_EEPROM, WPI_EEPROM_GP, WPI_GIO, WPI_UCODE_GP1, WPI_UCODE_GP2, WPI_GIO_CHICKEN, WPI_ANA_PLL, WPI_DBG_HPET_MEM, }; static const uint32_t prph_tbl[] = { WPI_APMG_CLK_CTRL, WPI_APMG_PS, WPI_APMG_PCI_STT, WPI_APMG_RFKILL, }; DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); for (i = 0; i < nitems(csr_tbl); i++) { DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); if ((i + 1) % 2 == 0) DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); } DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); if (wpi_nic_lock(sc) == 0) { for (i = 0; i < nitems(prph_tbl); i++) { DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", wpi_get_prph_string(prph_tbl[i]), wpi_prph_read(sc, prph_tbl[i])); if ((i + 1) % 2 == 0) DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); } DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); wpi_nic_unlock(sc); } else { DPRINTF(sc, WPI_DEBUG_REGISTER, "Cannot access internal registers.\n"); } } #endif /* * Dump the error log of the firmware when a firmware panic occurs. Although * we can't debug the firmware because it is neither open source nor free, it * can help us to identify certain classes of problems. */ static void wpi_fatal_intr(struct wpi_softc *sc) { struct wpi_fw_dump dump; uint32_t i, offset, count; /* Check that the error log address is valid. */ if (sc->errptr < WPI_FW_DATA_BASE || sc->errptr + sizeof (dump) > WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { printf("%s: bad firmware error log address 0x%08x\n", __func__, sc->errptr); return; } if (wpi_nic_lock(sc) != 0) { printf("%s: could not read firmware error log\n", __func__); return; } /* Read number of entries in the log. */ count = wpi_mem_read(sc, sc->errptr); if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { printf("%s: invalid count field (count = %u)\n", __func__, count); wpi_nic_unlock(sc); return; } /* Skip "count" field. */ offset = sc->errptr + sizeof (uint32_t); printf("firmware error log (count = %u):\n", count); for (i = 0; i < count; i++) { wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, sizeof (dump) / sizeof (uint32_t)); printf(" error type = \"%s\" (0x%08X)\n", (dump.desc < nitems(wpi_fw_errmsg)) ? wpi_fw_errmsg[dump.desc] : "UNKNOWN", dump.desc); printf(" error data = 0x%08X\n", dump.data); printf(" branch link = 0x%08X%08X\n", dump.blink[0], dump.blink[1]); printf(" interrupt link = 0x%08X%08X\n", dump.ilink[0], dump.ilink[1]); printf(" time = %u\n", dump.time); offset += sizeof (dump); } wpi_nic_unlock(sc); /* Dump driver status (TX and RX rings) while we're here. */ printf("driver status:\n"); WPI_TXQ_LOCK(sc); for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { struct wpi_tx_ring *ring = &sc->txq[i]; printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", i, ring->qid, ring->cur, ring->queued); } WPI_TXQ_UNLOCK(sc); printf(" rx ring: cur=%d\n", sc->rxq.cur); } static void wpi_intr(void *arg) { struct wpi_softc *sc = arg; uint32_t r1, r2; WPI_LOCK(sc); /* Disable interrupts. */ WPI_WRITE(sc, WPI_INT_MASK, 0); r1 = WPI_READ(sc, WPI_INT); if (__predict_false(r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)) goto end; /* Hardware gone! */ r2 = WPI_READ(sc, WPI_FH_INT); DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, r1, r2); if (r1 == 0 && r2 == 0) goto done; /* Interrupt not for us. */ /* Acknowledge interrupts. */ WPI_WRITE(sc, WPI_INT, r1); WPI_WRITE(sc, WPI_FH_INT, r2); if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { struct ieee80211com *ic = &sc->sc_ic; device_printf(sc->sc_dev, "fatal firmware error\n"); #ifdef WPI_DEBUG wpi_debug_registers(sc); #endif wpi_fatal_intr(sc); DPRINTF(sc, WPI_DEBUG_HW, "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : "(Hardware Error)"); ieee80211_restart_all(ic); goto end; } if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || (r2 & WPI_FH_INT_RX)) wpi_notif_intr(sc); if (r1 & WPI_INT_ALIVE) wakeup(sc); /* Firmware is alive. */ if (r1 & WPI_INT_WAKEUP) wpi_wakeup_intr(sc); done: /* Re-enable interrupts. */ if (__predict_true(sc->sc_running)) WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); end: WPI_UNLOCK(sc); } static void wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) { struct wpi_tx_ring *ring; struct wpi_tx_data *data; uint8_t cur; WPI_TXQ_LOCK(sc); ring = &sc->txq[ac]; while (ring->pending != 0) { ring->pending--; cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; data = &ring->data[cur]; bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; ieee80211_node_decref(data->ni); data->ni = NULL; } WPI_TXQ_UNLOCK(sc); } static int wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) { struct ieee80211_frame *wh; struct wpi_tx_cmd *cmd; struct wpi_tx_data *data; struct wpi_tx_desc *desc; struct wpi_tx_ring *ring; struct mbuf *m1; bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; uint8_t cur, pad; uint16_t hdrlen; int error, i, nsegs, totlen, frag; WPI_TXQ_LOCK(sc); KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if (__predict_false(sc->sc_running == 0)) { /* wpi_stop() was called */ error = ENETDOWN; goto end; } wh = mtod(buf->m, struct ieee80211_frame *); hdrlen = ieee80211_anyhdrsize(wh); totlen = buf->m->m_pkthdr.len; frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { error = EINVAL; goto end; } if (hdrlen & 3) { /* First segment length must be a multiple of 4. */ pad = 4 - (hdrlen & 3); } else pad = 0; ring = &sc->txq[buf->ac]; cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; desc = &ring->desc[cur]; data = &ring->data[cur]; /* Prepare TX firmware command. */ cmd = &ring->cmd[cur]; cmd->code = buf->code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = cur; memcpy(cmd->data, buf->data, buf->size); /* Save and trim IEEE802.11 header. */ memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); m_adj(buf->m, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); goto end; } if (error != 0) { /* Too many DMA segments, linearize mbuf. */ m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); if (m1 == NULL) { device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); error = ENOBUFS; goto end; } buf->m = m1; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { /* XXX fix this (applicable to the iwn(4) too) */ /* * NB: Do not return error; * original mbuf does not exist anymore. */ device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); if (ring->qid < WPI_CMD_QUEUE_NUM) { if_inc_counter(buf->ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); if (!frag) ieee80211_free_node(buf->ni); } m_freem(buf->m); error = 0; goto end; } } KASSERT(nsegs < WPI_MAX_SCATTER, ("too many DMA segments, nsegs (%d) should be less than %d", nsegs, WPI_MAX_SCATTER)); data->m = buf->m; data->ni = buf->ni; data->hdrlen = hdrlen; DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", __func__, ring->qid, cur, totlen, nsegs); /* Fill TX descriptor. */ desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); /* First DMA segment is used by the TX command. */ desc->segs[0].addr = htole32(data->cmd_paddr); desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); /* Other DMA segments are for data payload. */ seg = &segs[0]; for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(seg->ds_addr); desc->segs[i].len = htole32(seg->ds_len); seg++; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); ring->pending += 1; if (!frag) { if (ring->qid < WPI_CMD_QUEUE_NUM) { WPI_TXQ_STATE_LOCK(sc); ring->queued += ring->pending; callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); WPI_TXQ_STATE_UNLOCK(sc); } /* Kick TX ring. */ ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; ring->pending = 0; sc->sc_update_tx_ring(sc, ring); } else - ieee80211_node_incref(data->ni); + (void) ieee80211_ref_node(data->ni); end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, __func__); WPI_TXQ_UNLOCK(sc); return (error); } /* * Construct the data packet for a transmit buffer. */ static int wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct wpi_node *wn = WPI_NODE(ni); struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; struct wpi_buf tx_data; struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; uint32_t flags; uint16_t ac, qos; uint8_t tid, type, rate; int swcrypt, ismcast, totlen; wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); swcrypt = 1; /* Select EDCA Access Category and TX ring for this frame. */ if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; } else { qos = 0; tid = 0; } ac = M_WME_GETAC(m); /* Choose a TX rate index. */ if (type == IEEE80211_FC0_TYPE_MGT || type == IEEE80211_FC0_TYPE_CTL || (m->m_flags & M_EAPOL) != 0) rate = tp->mgmtrate; else if (ismcast) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { /* XXX pass pktlen */ (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } /* Encrypt the frame if need be. */ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) return (ENOBUFS); swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; /* 802.11 header may have moved. */ wh = mtod(m, struct ieee80211_frame *); } totlen = m->m_pkthdr.len; if (ieee80211_radiotap_active_vap(vap)) { struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; ieee80211_radiotap_tx(vap, m); } flags = 0; if (!ismcast) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= WPI_TX_NEED_ACK; } if (!IEEE80211_QOS_HAS_SEQ(wh)) flags |= WPI_TX_AUTO_SEQ; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) flags |= WPI_TX_MORE_FRAG; /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ if (!ismcast) { /* NB: Group frames are sent using CCK in 802.11b/g. */ if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { flags |= WPI_TX_NEED_RTS; } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && WPI_RATE_IS_OFDM(rate)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= WPI_TX_NEED_CTS; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= WPI_TX_NEED_RTS; } if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) flags |= WPI_TX_FULL_TXOP; } memset(tx, 0, sizeof (struct wpi_cmd_data)); if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= WPI_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } if (ismcast || type != IEEE80211_FC0_TYPE_DATA) tx->id = WPI_ID_BROADCAST; else { if (wn->id == WPI_ID_UNDEFINED) { device_printf(sc->sc_dev, "%s: undefined node id\n", __func__); return (EINVAL); } tx->id = wn->id; } if (!swcrypt) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_AES_CCM: tx->security = WPI_CIPHER_CCMP; break; default: break; } memcpy(tx->key, k->wk_key, k->wk_keylen); } if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { struct mbuf *next = m->m_nextpkt; tx->lnext = htole16(next->m_pkthdr.len); tx->fnext = htole32(tx->security | (flags & WPI_TX_NEED_ACK) | WPI_NEXT_STA_ID(tx->id)); } tx->len = htole16(totlen); tx->flags = htole32(flags); tx->plcp = rate2plcp(rate); tx->tid = tid; tx->lifetime = htole32(WPI_LIFETIME_INFINITE); tx->ofdm_mask = 0xff; tx->cck_mask = 0x0f; tx->rts_ntries = 7; tx->data_ntries = tp->maxretry; tx_data.ni = ni; tx_data.m = m; tx_data.size = sizeof(struct wpi_cmd_data); tx_data.code = WPI_CMD_TX_DATA; tx_data.ac = ac; return wpi_cmd2(sc, &tx_data); } static int wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k = NULL; struct ieee80211_frame *wh; struct wpi_buf tx_data; struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; uint32_t flags; uint8_t ac, type, rate; int swcrypt, totlen; wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; swcrypt = 1; ac = params->ibp_pri & 3; /* Choose a TX rate index. */ rate = params->ibp_rate0; flags = 0; if (!IEEE80211_QOS_HAS_SEQ(wh)) flags |= WPI_TX_AUTO_SEQ; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= WPI_TX_NEED_ACK; if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= WPI_TX_NEED_RTS; if (params->ibp_flags & IEEE80211_BPF_CTS) flags |= WPI_TX_NEED_CTS; if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) flags |= WPI_TX_FULL_TXOP; /* Encrypt the frame if need be. */ if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) return (ENOBUFS); swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; /* 802.11 header may have moved. */ wh = mtod(m, struct ieee80211_frame *); } totlen = m->m_pkthdr.len; if (ieee80211_radiotap_active_vap(vap)) { struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (params->ibp_flags & IEEE80211_BPF_CRYPTO) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } memset(tx, 0, sizeof (struct wpi_cmd_data)); if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= WPI_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } if (!swcrypt) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_AES_CCM: tx->security = WPI_CIPHER_CCMP; break; default: break; } memcpy(tx->key, k->wk_key, k->wk_keylen); } tx->len = htole16(totlen); tx->flags = htole32(flags); tx->plcp = rate2plcp(rate); tx->id = WPI_ID_BROADCAST; tx->lifetime = htole32(WPI_LIFETIME_INFINITE); tx->rts_ntries = params->ibp_try1; tx->data_ntries = params->ibp_try0; tx_data.ni = ni; tx_data.m = m; tx_data.size = sizeof(struct wpi_cmd_data); tx_data.code = WPI_CMD_TX_DATA; tx_data.ac = ac; return wpi_cmd2(sc, &tx_data); } static __inline int wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) { struct wpi_tx_ring *ring = &sc->txq[ac]; int retval; WPI_TXQ_STATE_LOCK(sc); retval = WPI_TX_RING_HIMARK - ring->queued; WPI_TXQ_STATE_UNLOCK(sc); return retval; } static int wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct wpi_softc *sc = ic->ic_softc; uint16_t ac; int error = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); ac = M_WME_GETAC(m); WPI_TX_LOCK(sc); /* NB: no fragments here */ if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { error = sc->sc_running ? ENOBUFS : ENETDOWN; goto unlock; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = wpi_tx_data(sc, m, ni); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = wpi_tx_data_raw(sc, m, ni, params); } unlock: WPI_TX_UNLOCK(sc); if (error != 0) { m_freem(m); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static int wpi_transmit(struct ieee80211com *ic, struct mbuf *m) { struct wpi_softc *sc = ic->ic_softc; struct ieee80211_node *ni; struct mbuf *mnext; uint16_t ac; int error, nmbufs; WPI_TX_LOCK(sc); DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); /* Check if interface is up & running. */ if (__predict_false(sc->sc_running == 0)) { error = ENXIO; goto unlock; } nmbufs = 1; for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) nmbufs++; /* Check for available space. */ ac = M_WME_GETAC(m); if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { error = ENOBUFS; goto unlock; } error = 0; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; do { mnext = m->m_nextpkt; if (wpi_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, nmbufs); wpi_free_txfrags(sc, ac); ieee80211_free_mbuf(m); ieee80211_free_node(ni); break; } } while((m = mnext) != NULL); DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); unlock: WPI_TX_UNLOCK(sc); return (error); } static void wpi_watchdog_rfkill(void *arg) { struct wpi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); /* No need to lock firmware memory. */ if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { /* Radio kill switch is still off. */ callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); } else ieee80211_runtask(ic, &sc->sc_radioon_task); } static void wpi_scan_timeout(void *arg) { struct wpi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; ic_printf(ic, "scan timeout\n"); ieee80211_restart_all(ic); } static void wpi_tx_timeout(void *arg) { struct wpi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; ic_printf(ic, "device timeout\n"); ieee80211_restart_all(ic); } static void wpi_parent(struct ieee80211com *ic) { struct wpi_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (ic->ic_nrunning > 0) { if (wpi_init(sc) == 0) { ieee80211_notify_radio(ic, 1); ieee80211_start_all(ic); } else { ieee80211_notify_radio(ic, 0); ieee80211_stop(vap); } } else { ieee80211_notify_radio(ic, 0); wpi_stop(sc); } } /* * Send a command to the firmware. */ static int wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, int async) { struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct mbuf *m; bus_addr_t paddr; uint16_t totlen; int error; WPI_TXQ_LOCK(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if (__predict_false(sc->sc_running == 0)) { /* wpi_stop() was called */ if (code == WPI_CMD_SCAN) error = ENETDOWN; else error = 0; goto fail; } if (async == 0) WPI_LOCK_ASSERT(sc); DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", __func__, wpi_cmd_str(code), size, async); desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; totlen = 4 + size; if (size > sizeof cmd->data) { /* Command is too large to fit in a descriptor. */ if (totlen > MCLBYTES) { error = EINVAL; goto fail; } m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { error = ENOMEM; goto fail; } cmd = mtod(m, struct wpi_tx_cmd *); error = bus_dmamap_load(ring->data_dmat, data->map, cmd, totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); goto fail; } data->m = m; } else { cmd = &ring->cmd[ring->cur]; paddr = data->cmd_paddr; } cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); desc->nsegs = 1 + (WPI_PAD32(size) << 4); desc->segs[0].addr = htole32(paddr); desc->segs[0].len = htole32(totlen); if (size > sizeof cmd->data) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } else { bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Kick command ring. */ ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; sc->sc_update_tx_ring(sc, ring); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); WPI_TXQ_UNLOCK(sc); return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); WPI_TXQ_UNLOCK(sc); return error; } /* * Configure HW multi-rate retries. */ static int wpi_mrr_setup(struct wpi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct wpi_mrr_setup mrr; uint8_t i; int error; /* CCK rates (not used with 802.11a). */ for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { mrr.rates[i].flags = 0; mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; /* Fallback to the immediate lower CCK rate (if any.) */ mrr.rates[i].next = (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; /* Try twice at this rate before falling back to "next". */ mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; } /* OFDM rates (not used with 802.11b). */ for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { mrr.rates[i].flags = 0; mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; /* Fallback to the immediate lower rate (if any.) */ /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? ((ic->ic_curmode == IEEE80211_MODE_11A) ? WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : i - 1; /* Try twice at this rate before falling back to "next". */ mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; } /* Setup MRR for control frames. */ mrr.which = htole32(WPI_MRR_CTL); error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for control frames\n"); return error; } /* Setup MRR for data frames. */ mrr.which = htole32(WPI_MRR_DATA); error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for data frames\n"); return error; } return 0; } static int wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); struct wpi_node *wn = WPI_NODE(ni); struct wpi_node_info node; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (wn->id == WPI_ID_UNDEFINED) return EINVAL; memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.id = wn->id; node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; node.action = htole32(WPI_ACTION_SET_RATE); node.antenna = WPI_ANTENNA_BOTH; DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, wn->id, ether_sprintf(ni->ni_macaddr)); error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: wpi_cmd() call failed with error code %d\n", __func__, error); return error; } if (wvp->wv_gtk != 0) { error = wpi_set_global_keys(ni); if (error != 0) { device_printf(sc->sc_dev, "%s: error while setting global keys\n", __func__); return ENXIO; } } return 0; } /* * Broadcast node is used to send group-addressed and management frames. */ static int wpi_add_broadcast_node(struct wpi_softc *sc, int async) { struct ieee80211com *ic = &sc->sc_ic; struct wpi_node_info node; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); node.id = WPI_ID_BROADCAST; node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; node.action = htole32(WPI_ACTION_SET_RATE); node.antenna = WPI_ANTENNA_BOTH; DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); } static int wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_node *wn = WPI_NODE(ni); int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); wn->id = wpi_add_node_entry_sta(sc); if ((error = wpi_add_node(sc, ni)) != 0) { wpi_del_node_entry(sc, wn->id); wn->id = WPI_ID_UNDEFINED; return error; } return 0; } static int wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_node *wn = WPI_NODE(ni); int error; KASSERT(wn->id == WPI_ID_UNDEFINED, ("the node %d was added before", wn->id)); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); return ENOMEM; } if ((error = wpi_add_node(sc, ni)) != 0) { wpi_del_node_entry(sc, wn->id); wn->id = WPI_ID_UNDEFINED; return error; } return 0; } static void wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_node *wn = WPI_NODE(ni); struct wpi_cmd_del_node node; int error; KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.count = 1; DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, wn->id, ether_sprintf(ni->ni_macaddr)); error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not delete node %u, error %d\n", __func__, wn->id, error); } } static int wpi_updateedca(struct ieee80211com *ic) { #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ struct wpi_softc *sc = ic->ic_softc; struct chanAccParams chp; struct wpi_edca_params cmd; int aci, error; ieee80211_wme_ic_getparams(ic, &chp); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); memset(&cmd, 0, sizeof cmd); cmd.flags = htole32(WPI_EDCA_UPDATE); for (aci = 0; aci < WME_NUM_AC; aci++) { const struct wmeParams *ac = &chp.cap_wmeParams[aci]; cmd.ac[aci].aifsn = ac->wmep_aifsn; cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); cmd.ac[aci].txoplimit = htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); DPRINTF(sc, WPI_DEBUG_EDCA, "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, cmd.ac[aci].txoplimit); } error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return error; #undef WPI_EXP2 } static void wpi_set_promisc(struct wpi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t promisc_filter; promisc_filter = WPI_FILTER_CTL; if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) promisc_filter |= WPI_FILTER_PROMISC; if (ic->ic_promisc > 0) sc->rxon.filter |= htole32(promisc_filter); else sc->rxon.filter &= ~htole32(promisc_filter); } static void wpi_update_promisc(struct ieee80211com *ic) { struct wpi_softc *sc = ic->ic_softc; WPI_LOCK(sc); if (sc->sc_running == 0) { WPI_UNLOCK(sc); return; } WPI_UNLOCK(sc); WPI_RXON_LOCK(sc); wpi_set_promisc(sc); if (wpi_send_rxon(sc, 1, 1) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); } WPI_RXON_UNLOCK(sc); } static void wpi_update_mcast(struct ieee80211com *ic) { /* Ignore */ } static void wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct wpi_cmd_led led; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); led.which = which; led.unit = htole32(100000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); } static int wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_cmd_timing cmd; uint64_t val, mod; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); memset(&cmd, 0, sizeof cmd); memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); cmd.bintval = htole16(ni->ni_intval); cmd.lintval = htole16(10); /* Compute remaining time until next beacon. */ val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; mod = le64toh(cmd.tstamp) % val; cmd.binitval = htole32((uint32_t)(val - mod)); DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); } /* * This function is called periodically (every 60 seconds) to adjust output * power to temperature changes. */ static void wpi_power_calibration(struct wpi_softc *sc) { int temp; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); /* Update sensor data. */ temp = (int)WPI_READ(sc, WPI_UCODE_GP2); DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); /* Sanity-check read value. */ if (temp < -260 || temp > 25) { /* This can't be correct, ignore. */ DPRINTF(sc, WPI_DEBUG_TEMP, "out-of-range temperature reported: %d\n", temp); return; } DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); /* Adjust Tx power if need be. */ if (abs(temp - sc->temp) <= 6) return; sc->temp = temp; if (wpi_set_txpower(sc, 1) != 0) { /* just warn, too bad for the automatic calibration... */ device_printf(sc->sc_dev,"could not adjust Tx power\n"); } } /* * Set TX power for current channel. */ static int wpi_set_txpower(struct wpi_softc *sc, int async) { struct wpi_power_group *group; struct wpi_cmd_txpower cmd; uint8_t chan; int idx, is_chan_5ghz, i; /* Retrieve current channel from last RXON. */ chan = sc->rxon.chan; is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; /* Find the TX power group to which this channel belongs. */ if (is_chan_5ghz) { for (group = &sc->groups[1]; group < &sc->groups[4]; group++) if (chan <= group->chan) break; } else group = &sc->groups[0]; memset(&cmd, 0, sizeof cmd); cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; cmd.chan = htole16(chan); /* Set TX power for all OFDM and CCK rates. */ for (i = 0; i <= WPI_RIDX_MAX ; i++) { /* Retrieve TX power for this channel/rate. */ idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; if (is_chan_5ghz) { cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; } else { cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; } DPRINTF(sc, WPI_DEBUG_TEMP, "chan %d/ridx %d: power index %d\n", chan, i, idx); } return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); } /* * Determine Tx power index for a given channel/rate combination. * This takes into account the regulatory information from EEPROM and the * current temperature. */ static int wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, uint8_t chan, int is_chan_5ghz, int ridx) { /* Fixed-point arithmetic division using a n-bit fractional part. */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* Linear interpolation. */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) struct wpi_power_sample *sample; int pwr, idx; /* Default TX power is group maximum TX power minus 3dB. */ pwr = group->maxpwr / 2; /* Decrease TX power for highest OFDM rates to reduce distortion. */ switch (ridx) { case WPI_RIDX_OFDM36: pwr -= is_chan_5ghz ? 5 : 0; break; case WPI_RIDX_OFDM48: pwr -= is_chan_5ghz ? 10 : 7; break; case WPI_RIDX_OFDM54: pwr -= is_chan_5ghz ? 12 : 9; break; } /* Never exceed the channel maximum allowed TX power. */ pwr = min(pwr, sc->maxpwr[chan]); /* Retrieve TX power index into gain tables from samples. */ for (sample = group->samples; sample < &group->samples[3]; sample++) if (pwr > sample[1].power) break; /* Fixed-point linear interpolation using a 19-bit fractional part. */ idx = interpolate(pwr, sample[0].power, sample[0].index, sample[1].power, sample[1].index, 19); /*- * Adjust power index based on current temperature: * - if cooler than factory-calibrated: decrease output power * - if warmer than factory-calibrated: increase output power */ idx -= (sc->temp - group->temp) * 11 / 100; /* Decrease TX power for CCK rates (-5dB). */ if (ridx >= WPI_RIDX_CCK1) idx += 10; /* Make sure idx stays in a valid range. */ if (idx < 0) return 0; if (idx > WPI_MAX_PWR_INDEX) return WPI_MAX_PWR_INDEX; return idx; #undef interpolate #undef fdivround } /* * Set STA mode power saving level (between 0 and 5). * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. */ static int wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) { struct wpi_pmgt_cmd cmd; const struct wpi_pmgt *pmgt; uint32_t max, reg; uint8_t skip_dtim; int i; DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: dtim=%d, level=%d, async=%d\n", __func__, dtim, level, async); /* Select which PS parameters to use. */ if (dtim <= 10) pmgt = &wpi_pmgt[0][level]; else pmgt = &wpi_pmgt[1][level]; memset(&cmd, 0, sizeof cmd); if (level != 0) /* not CAM */ cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ cmd.flags |= htole16(WPI_PS_PCI_PMGT); cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); if (dtim == 0) { dtim = 1; skip_dtim = 0; } else skip_dtim = pmgt->skip_dtim; if (skip_dtim != 0) { cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); max = pmgt->intval[4]; if (max == (uint32_t)-1) max = dtim * (skip_dtim + 1); else if (max > dtim) max = rounddown(max, dtim); } else max = dtim; for (i = 0; i < 5; i++) cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); } static int wpi_send_btcoex(struct wpi_softc *sc) { struct wpi_bluetooth cmd; memset(&cmd, 0, sizeof cmd); cmd.flags = WPI_BT_COEX_MODE_4WIRE; cmd.lead_time = WPI_BT_LEAD_TIME_DEF; cmd.max_kill = WPI_BT_MAX_KILL_DEF; DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", __func__); return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); } static int wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) { int error; if (async) WPI_RXON_LOCK_ASSERT(sc); if (assoc && wpi_check_bss_filter(sc) != 0) { struct wpi_assoc rxon_assoc; rxon_assoc.flags = sc->rxon.flags; rxon_assoc.filter = sc->rxon.filter; rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; rxon_assoc.cck_mask = sc->rxon.cck_mask; rxon_assoc.reserved = 0; error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, sizeof (struct wpi_assoc), async); if (error != 0) { device_printf(sc->sc_dev, "RXON_ASSOC command failed, error %d\n", error); return error; } } else { if (async) { WPI_NT_LOCK(sc); error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, sizeof (struct wpi_rxon), async); if (error == 0) wpi_clear_node_table(sc); WPI_NT_UNLOCK(sc); } else { error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, sizeof (struct wpi_rxon), async); if (error == 0) wpi_clear_node_table(sc); } if (error != 0) { device_printf(sc->sc_dev, "RXON command failed, error %d\n", error); return error; } /* Add broadcast node. */ error = wpi_add_broadcast_node(sc, async); if (error != 0) { device_printf(sc->sc_dev, "could not add broadcast node, error %d\n", error); return error; } } /* Configuration has changed, set Tx power accordingly. */ if ((error = wpi_set_txpower(sc, async)) != 0) { device_printf(sc->sc_dev, "%s: could not set TX power, error %d\n", __func__, error); return error; } return 0; } /** * Configure the card to listen to a particular channel, this transisions the * card in to being able to receive frames from remote devices. */ static int wpi_config(struct wpi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_channel *c = ic->ic_curchan; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Set power saving level to CAM during initialization. */ if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not set power saving level\n", __func__); return error; } /* Configure bluetooth coexistence. */ if ((error = wpi_send_btcoex(sc)) != 0) { device_printf(sc->sc_dev, "could not configure bluetooth coexistence\n"); return error; } /* Configure adapter. */ memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); /* Set default channel. */ sc->rxon.chan = ieee80211_chan2ieee(ic, c); sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(c)) sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); sc->rxon.filter = WPI_FILTER_MULTICAST; switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->rxon.mode = WPI_MODE_STA; break; case IEEE80211_M_IBSS: sc->rxon.mode = WPI_MODE_IBSS; sc->rxon.filter |= WPI_FILTER_BEACON; break; case IEEE80211_M_HOSTAP: /* XXX workaround for beaconing */ sc->rxon.mode = WPI_MODE_IBSS; sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; break; case IEEE80211_M_AHDEMO: sc->rxon.mode = WPI_MODE_HOSTAP; break; case IEEE80211_M_MONITOR: sc->rxon.mode = WPI_MODE_MONITOR; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode); return EINVAL; } sc->rxon.filter = htole32(sc->rxon.filter); wpi_set_promisc(sc); sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); return error; } /* Setup rate scalling. */ if ((error = wpi_mrr_setup(sc)) != 0) { device_printf(sc->sc_dev, "could not setup MRR, error %d\n", error); return error; } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static uint16_t wpi_get_active_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c, uint8_t n_probes) { /* No channel? Default to 2GHz settings. */ if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { return (WPI_ACTIVE_DWELL_TIME_2GHZ + WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); } /* 5GHz dwell time. */ return (WPI_ACTIVE_DWELL_TIME_5GHZ + WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); } /* * Limit the total dwell time. * * Returns the dwell time in milliseconds. */ static uint16_t wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t bintval = 0; /* bintval is in TU (1.024mS) */ if (vap != NULL) bintval = vap->iv_bss->ni_intval; /* * If it's non-zero, we should calculate the minimum of * it and the DWELL_BASE. * * XXX Yes, the math should take into account that bintval * is 1.024mS, not 1mS.. */ if (bintval > 0) { DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, bintval); return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); } /* No association context? Default. */ return dwell_time; } static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) { uint16_t passive; if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; else passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; /* Clamp to the beacon interval if we're associated. */ return (wpi_limit_dwell(sc, passive)); } static uint32_t wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) { uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; uint32_t nbeacons = time / bintval; if (mod > WPI_PAUSE_MAX_TIME) mod = WPI_PAUSE_MAX_TIME; return WPI_PAUSE_SCAN(nbeacons, mod); } /* * Send a scan request to the firmware. */ static int wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_scan_state *ss = ic->ic_scan; struct ieee80211vap *vap = ss->ss_vap; struct wpi_scan_hdr *hdr; struct wpi_cmd_data *tx; struct wpi_scan_essid *essids; struct wpi_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; uint16_t bintval, buflen, dwell_active, dwell_passive; uint8_t *buf, *frm, i, nssid; int bgscan, error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* * We are absolutely not allowed to send a scan command when another * scan command is pending. */ if (callout_pending(&sc->scan_timeout)) { device_printf(sc->sc_dev, "%s: called whilst scanning!\n", __func__); error = EAGAIN; goto fail; } bgscan = wpi_check_bss_filter(sc); bintval = vap->iv_bss->ni_intval; if (bgscan != 0 && bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { error = EOPNOTSUPP; goto fail; } buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) { device_printf(sc->sc_dev, "%s: could not allocate buffer for scan command\n", __func__); error = ENOMEM; goto fail; } hdr = (struct wpi_scan_hdr *)buf; /* * Move to the next channel if no packets are received within 10 msecs * after sending the probe request. */ hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); hdr->quiet_threshold = htole16(1); if (bgscan != 0) { /* * Max needs to be greater than active and passive and quiet! * It's also in microseconds! */ hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, bintval)); } hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); tx = (struct wpi_cmd_data *)(hdr + 1); tx->flags = htole32(WPI_TX_AUTO_SEQ); tx->id = WPI_ID_BROADCAST; tx->lifetime = htole32(WPI_LIFETIME_INFINITE); if (IEEE80211_IS_CHAN_5GHZ(c)) { /* Send probe requests at 6Mbps. */ tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; } else { hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); /* Send probe requests at 1Mbps. */ tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; } essids = (struct wpi_scan_essid *)(tx + 1); nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); for (i = 0; i < nssid; i++) { essids[i].id = IEEE80211_ELEMID_SSID; essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); #ifdef WPI_DEBUG if (sc->sc_debug & WPI_DEBUG_SCAN) { printf("Scanning Essid: "); ieee80211_print_essid(essids[i].data, essids[i].len); printf("\n"); } #endif } /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); frm = (uint8_t *)(wh + 1); frm = ieee80211_add_ssid(frm, NULL, 0); frm = ieee80211_add_rates(frm, rs); if (rs->rs_nrates > IEEE80211_RATE_SIZE) frm = ieee80211_add_xrates(frm, rs); /* Set length of probe request. */ tx->len = htole16(frm - (uint8_t *)wh); /* * Construct information about the channel that we * want to scan. The firmware expects this to be directly * after the scan probe request */ chan = (struct wpi_scan_chan *)frm; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if (nssid) { hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; chan->flags |= WPI_CHAN_NPBREQS(nssid); } else hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; if (!IEEE80211_IS_CHAN_PASSIVE(c)) chan->flags |= WPI_CHAN_ACTIVE; /* * Calculate the active/passive dwell times. */ dwell_active = wpi_get_active_dwell_time(sc, c, nssid); dwell_passive = wpi_get_passive_dwell_time(sc, c); /* Make sure they're valid. */ if (dwell_active > dwell_passive) dwell_active = dwell_passive; chan->active = htole16(dwell_active); chan->passive = htole16(dwell_passive); chan->dsp_gain = 0x6e; /* Default level */ if (IEEE80211_IS_CHAN_5GHZ(c)) chan->rf_gain = 0x3b; else chan->rf_gain = 0x28; DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); hdr->nchan++; if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { /* XXX Force probe request transmission. */ memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); chan++; /* Reduce unnecessary delay. */ chan->flags = 0; chan->passive = chan->active = hdr->quiet_time; hdr->nchan++; } chan++; buflen = (uint8_t *)chan - buf; hdr->len = htole16(buflen); DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", hdr->nchan); error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); free(buf, M_DEVBUF); if (error != 0) goto fail; callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); return error; } static int wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; struct ieee80211_channel *c = ni->ni_chan; int error; WPI_RXON_LOCK(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Update adapter configuration. */ sc->rxon.associd = 0; sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); sc->rxon.chan = ieee80211_chan2ieee(ic, c); sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(c)) sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(c)) { sc->rxon.cck_mask = 0; sc->rxon.ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(c)) { sc->rxon.cck_mask = 0x03; sc->rxon.ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon.cck_mask = 0x0f; sc->rxon.ofdm_mask = 0x15; } DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask); if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); WPI_RXON_UNLOCK(sc); return error; } static int wpi_config_beacon(struct wpi_vap *wvp) { struct ieee80211vap *vap = &wvp->wv_vap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct wpi_buf *bcn = &wvp->wv_bcbuf; struct wpi_softc *sc = ic->ic_softc; struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; struct ieee80211_tim_ie *tie; struct mbuf *m; uint8_t *ptr; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); WPI_VAP_LOCK_ASSERT(wvp); cmd->len = htole16(bcn->m->m_pkthdr.len); cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; /* XXX seems to be unused */ if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { tie = (struct ieee80211_tim_ie *) bo->bo_tim; ptr = mtod(bcn->m, uint8_t *); cmd->tim = htole16(bo->bo_tim - ptr); cmd->timsz = tie->tim_len; } /* Necessary for recursion in ieee80211_beacon_update(). */ m = bcn->m; bcn->m = m_dup(m, M_NOWAIT); if (bcn->m == NULL) { device_printf(sc->sc_dev, "%s: could not copy beacon frame\n", __func__); error = ENOMEM; goto end; } if ((error = wpi_cmd2(sc, bcn)) != 0) { device_printf(sc->sc_dev, "%s: could not update beacon frame, error %d", __func__, error); m_freem(bcn->m); } /* Restore mbuf. */ end: bcn->m = m; return error; } static int wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct wpi_vap *wvp = WPI_VAP(vap); struct wpi_buf *bcn = &wvp->wv_bcbuf; struct mbuf *m; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (ni->ni_chan == IEEE80211_CHAN_ANYC) return EINVAL; m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); return ENOMEM; } WPI_VAP_LOCK(wvp); if (bcn->m != NULL) m_freem(bcn->m); bcn->m = m; error = wpi_config_beacon(wvp); WPI_VAP_UNLOCK(wvp); return error; } static void wpi_update_beacon(struct ieee80211vap *vap, int item) { struct wpi_softc *sc = vap->iv_ic->ic_softc; struct wpi_vap *wvp = WPI_VAP(vap); struct wpi_buf *bcn = &wvp->wv_bcbuf; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; int mcast = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); WPI_VAP_LOCK(wvp); if (bcn->m == NULL) { bcn->m = ieee80211_beacon_alloc(ni); if (bcn->m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); WPI_VAP_UNLOCK(wvp); return; } } WPI_VAP_UNLOCK(wvp); if (item == IEEE80211_BEACON_TIM) mcast = 1; /* TODO */ setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, bcn->m, mcast); WPI_VAP_LOCK(wvp); wpi_config_beacon(wvp); WPI_VAP_UNLOCK(wvp); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); } static void wpi_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); int error; WPI_NT_LOCK(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { if ((error = wpi_add_ibss_node(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not add IBSS node, error %d\n", __func__, error); } } WPI_NT_UNLOCK(sc); } static int wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; struct ieee80211_channel *c = ni->ni_chan; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Link LED blinks while monitoring. */ wpi_set_led(sc, WPI_LED_LINK, 5, 5); return 0; } /* XXX kernel panic workaround */ if (c == IEEE80211_CHAN_ANYC) { device_printf(sc->sc_dev, "%s: incomplete configuration\n", __func__); return EINVAL; } if ((error = wpi_set_timing(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not set timing, error %d\n", __func__, error); return error; } /* Update adapter configuration. */ WPI_RXON_LOCK(sc); IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); sc->rxon.chan = ieee80211_chan2ieee(ic, c); sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(c)) sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(c)) { sc->rxon.cck_mask = 0; sc->rxon.ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(c)) { sc->rxon.cck_mask = 0x03; sc->rxon.ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon.cck_mask = 0x0f; sc->rxon.ofdm_mask = 0x15; } sc->rxon.filter |= htole32(WPI_FILTER_BSS); DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags); if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { WPI_RXON_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); return error; } /* Start periodic calibration timer. */ callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); WPI_RXON_UNLOCK(sc); if (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_HOSTAP) { if ((error = wpi_setup_beacon(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not setup beacon, error %d\n", __func__, error); return error; } } if (vap->iv_opmode == IEEE80211_M_STA) { /* Add BSS node. */ WPI_NT_LOCK(sc); error = wpi_add_sta_node(sc, ni); WPI_NT_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not add BSS node, error %d\n", __func__, error); return error; } } /* Link LED always on while associated. */ wpi_set_led(sc, WPI_LED_LINK, 0, 1); /* Enable power-saving mode if requested by user. */ if ((vap->iv_flags & IEEE80211_F_PMGTON) && vap->iv_opmode != IEEE80211_M_IBSS) (void)wpi_set_pslevel(sc, 0, 3, 1); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); return 0; } static int wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) { const struct ieee80211_cipher *cip = k->wk_cipher; struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); struct wpi_node_info node; uint16_t kflags; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (wpi_check_node_entry(sc, wn->id) == 0) { device_printf(sc->sc_dev, "%s: node does not exist\n", __func__); return 0; } switch (cip->ic_cipher) { case IEEE80211_CIPHER_AES_CCM: kflags = WPI_KFLAG_CCMP; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, cip->ic_cipher); return 0; } kflags |= WPI_KFLAG_KID(k->wk_keyix); if (k->wk_flags & IEEE80211_KEY_GROUP) kflags |= WPI_KFLAG_MULTICAST; memset(&node, 0, sizeof node); node.id = wn->id; node.control = WPI_NODE_UPDATE; node.flags = WPI_FLAG_KEY_SET; node.kflags = htole16(kflags); memcpy(node.key, k->wk_key, k->wk_keylen); again: DPRINTF(sc, WPI_DEBUG_KEY, "%s: setting %s key id %d for node %d (%s)\n", __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev, "can't update node info, error %d\n", error); return !error; } if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { kflags |= WPI_KFLAG_MULTICAST; node.kflags = htole16(kflags); goto again; } return 1; } static void wpi_load_key_cb(void *arg, struct ieee80211_node *ni) { const struct ieee80211_key *k = arg; struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); int error; if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) return; WPI_NT_LOCK(sc); error = wpi_load_key(ni, k); WPI_NT_UNLOCK(sc); if (error == 0) { device_printf(sc->sc_dev, "%s: error while setting key\n", __func__); } } static int wpi_set_global_keys(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *wk = &vap->iv_nw_keys[0]; int error = 1; for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) if (wk->wk_keyix != IEEE80211_KEYIX_NONE) error = wpi_load_key(ni, wk); return !error; } static int wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) { struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); struct wpi_node_info node; uint16_t kflags; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (wpi_check_node_entry(sc, wn->id) == 0) { DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); return 1; /* Nothing to do. */ } kflags = WPI_KFLAG_KID(k->wk_keyix); if (k->wk_flags & IEEE80211_KEY_GROUP) kflags |= WPI_KFLAG_MULTICAST; memset(&node, 0, sizeof node); node.id = wn->id; node.control = WPI_NODE_UPDATE; node.flags = WPI_FLAG_KEY_SET; node.kflags = htole16(kflags); again: DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev, "can't update node info, error %d\n", error); return !error; } if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { kflags |= WPI_KFLAG_MULTICAST; node.kflags = htole16(kflags); goto again; } return 1; } static void wpi_del_key_cb(void *arg, struct ieee80211_node *ni) { const struct ieee80211_key *k = arg; struct ieee80211vap *vap = ni->ni_vap; struct wpi_softc *sc = ni->ni_ic->ic_softc; struct wpi_node *wn = WPI_NODE(ni); int error; if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) return; WPI_NT_LOCK(sc); error = wpi_del_key(ni, k); WPI_NT_UNLOCK(sc); if (error == 0) { device_printf(sc->sc_dev, "%s: error while deleting key\n", __func__); } } static int wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, int set) { struct ieee80211com *ic = vap->iv_ic; struct wpi_softc *sc = ic->ic_softc; struct wpi_vap *wvp = WPI_VAP(vap); struct ieee80211_node *ni; int error, ni_ref = 0; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } if (!(k->wk_flags & IEEE80211_KEY_RECV)) { /* XMIT keys are handled in wpi_tx_data(). */ return 1; } /* Handle group keys. */ if (&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { WPI_NT_LOCK(sc); if (set) wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); else wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); WPI_NT_UNLOCK(sc); if (vap->iv_state == IEEE80211_S_RUN) { ieee80211_iterate_nodes(&ic->ic_sta, set ? wpi_load_key_cb : wpi_del_key_cb, __DECONST(void *, k)); } return 1; } switch (vap->iv_opmode) { case IEEE80211_M_STA: ni = vap->iv_bss; break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_HOSTAP: ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); if (ni == NULL) return 0; /* should not happen */ ni_ref = 1; break; default: device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, vap->iv_opmode); return 0; } WPI_NT_LOCK(sc); if (set) error = wpi_load_key(ni, k); else error = wpi_del_key(ni, k); WPI_NT_UNLOCK(sc); if (ni_ref) ieee80211_node_decref(ni); return error; } static int wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return wpi_process_key(vap, k, 1); } static int wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { return wpi_process_key(vap, k, 0); } /* * This function is called after the runtime firmware notifies us of its * readiness (called in a process context). */ static int wpi_post_alive(struct wpi_softc *sc) { int ntries, error; /* Check (again) that the radio is not disabled. */ if ((error = wpi_nic_lock(sc)) != 0) return error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); /* NB: Runtime firmware must be up and running. */ if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { device_printf(sc->sc_dev, "RF switch: radio disabled (%s)\n", __func__); wpi_nic_unlock(sc); return EPERM; /* :-) */ } wpi_nic_unlock(sc); /* Wait for thermal sensor to calibrate. */ for (ntries = 0; ntries < 1000; ntries++) { if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for thermal sensor calibration\n"); return ETIMEDOUT; } DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); return 0; } /* * The firmware boot code is small and is intended to be copied directly into * the NIC internal memory (no DMA transfer). */ static int wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) { int error, ntries; DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); size /= sizeof (uint32_t); if ((error = wpi_nic_lock(sc)) != 0) return error; /* Copy microcode image into NIC memory. */ wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, (const uint32_t *)ucode, size); wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); /* Start boot load now. */ wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); /* Wait for transfer to complete. */ for (ntries = 0; ntries < 1000; ntries++) { uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); DPRINTF(sc, WPI_DEBUG_HW, "firmware status=0x%x, val=0x%x, result=0x%x\n", status, WPI_FH_TX_STATUS_IDLE(6), status & WPI_FH_TX_STATUS_IDLE(6)); if (status & WPI_FH_TX_STATUS_IDLE(6)) { DPRINTF(sc, WPI_DEBUG_HW, "Status Match! - ntries = %d\n", ntries); break; } DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); wpi_nic_unlock(sc); return ETIMEDOUT; } /* Enable boot after power up. */ wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); wpi_nic_unlock(sc); return 0; } static int wpi_load_firmware(struct wpi_softc *sc) { struct wpi_fw_info *fw = &sc->fw; struct wpi_dma_info *dma = &sc->fw_dma; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); /* Copy initialization sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->init.data, fw->init.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find initialization sections. */ if ((error = wpi_nic_lock(sc)) != 0) return error; wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, dma->paddr + WPI_FW_DATA_MAXSZ); wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); wpi_nic_unlock(sc); /* Load firmware boot code. */ error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); return error; } /* Now press "execute". */ WPI_WRITE(sc, WPI_RESET, 0); /* Wait at most one second for first alive notification. */ if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } /* Copy runtime sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->main.data, fw->main.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find runtime sections. */ if ((error = wpi_nic_lock(sc)) != 0) return error; wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, dma->paddr + WPI_FW_DATA_MAXSZ); wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, WPI_FW_UPDATED | fw->main.textsz); wpi_nic_unlock(sc); return 0; } static int wpi_read_firmware(struct wpi_softc *sc) { const struct firmware *fp; struct wpi_fw_info *fw = &sc->fw; const struct wpi_firmware_hdr *hdr; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); WPI_UNLOCK(sc); fp = firmware_get(WPI_FW_NAME); WPI_LOCK(sc); if (fp == NULL) { device_printf(sc->sc_dev, "could not load firmware image '%s'\n", WPI_FW_NAME); return EINVAL; } sc->fw_fp = fp; if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fp->datasize); error = EINVAL; goto fail; } fw->size = fp->datasize; fw->data = (const uint8_t *)fp->data; /* Extract firmware header information. */ hdr = (const struct wpi_firmware_hdr *)fw->data; /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ fw->main.textsz = le32toh(hdr->rtextsz); fw->main.datasz = le32toh(hdr->rdatasz); fw->init.textsz = le32toh(hdr->itextsz); fw->init.datasz = le32toh(hdr->idatasz); fw->boot.textsz = le32toh(hdr->btextsz); fw->boot.datasz = 0; /* Sanity-check firmware header. */ if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || fw->main.datasz > WPI_FW_DATA_MAXSZ || fw->init.textsz > WPI_FW_TEXT_MAXSZ || fw->init.datasz > WPI_FW_DATA_MAXSZ || fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || (fw->boot.textsz & 3) != 0) { device_printf(sc->sc_dev, "invalid firmware header\n"); error = EINVAL; goto fail; } /* Check that all firmware sections fit. */ if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + fw->init.textsz + fw->init.datasz + fw->boot.textsz) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fw->size); error = EINVAL; goto fail; } /* Get pointers to firmware sections. */ fw->main.text = (const uint8_t *)(hdr + 1); fw->main.data = fw->main.text + fw->main.textsz; fw->init.text = fw->main.data + fw->main.datasz; fw->init.data = fw->init.text + fw->init.textsz; fw->boot.text = fw->init.data + fw->init.datasz; DPRINTF(sc, WPI_DEBUG_FIRMWARE, "Firmware Version: Major %d, Minor %d, Driver %d, \n" "runtime (text: %u, data: %u) init (text: %u, data %u) " "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), fw->main.textsz, fw->main.datasz, fw->init.textsz, fw->init.datasz, fw->boot.textsz); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); return 0; fail: wpi_unload_firmware(sc); return error; } /** * Free the referenced firmware image */ static void wpi_unload_firmware(struct wpi_softc *sc) { if (sc->fw_fp != NULL) { firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; } } static int wpi_clock_wait(struct wpi_softc *sc) { int ntries; /* Set "initialization complete" bit. */ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); /* Wait for clock stabilization. */ for (ntries = 0; ntries < 2500; ntries++) { if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) return 0; DELAY(100); } device_printf(sc->sc_dev, "%s: timeout waiting for clock stabilization\n", __func__); return ETIMEDOUT; } static int wpi_apm_init(struct wpi_softc *sc) { uint32_t reg; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); /* Disable L0s exit timer (NMI bug workaround). */ WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); /* Don't wait for ICH L0s (ICH bug workaround). */ WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); /* Set FH wait threshold to max (HW bug under stress workaround). */ WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); else WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); /* Wait for clock stabilization before accessing prph. */ if ((error = wpi_clock_wait(sc)) != 0) return error; if ((error = wpi_nic_lock(sc)) != 0) return error; /* Cleanup. */ wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); /* Enable DMA and BSM (Bootstrap State Machine). */ wpi_prph_write(sc, WPI_APMG_CLK_EN, WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); DELAY(20); /* Disable L1-Active. */ wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); wpi_nic_unlock(sc); return 0; } static void wpi_apm_stop_master(struct wpi_softc *sc) { int ntries; /* Stop busmaster DMA activity. */ WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == WPI_GP_CNTRL_MAC_PS) return; /* Already asleep. */ for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) return; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); } static void wpi_apm_stop(struct wpi_softc *sc) { wpi_apm_stop_master(sc); /* Reset the entire device. */ WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); DELAY(10); /* Clear "initialization complete" bit. */ WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); } static void wpi_nic_config(struct wpi_softc *sc) { uint32_t rev; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); /* voodoo from the Linux "driver".. */ rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); if ((rev & 0xc0) == 0x40) WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); else if (!(rev & 0x80)) WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); if (sc->cap == 0x80) WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); if ((sc->rev & 0xf0) == 0xd0) WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); else WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); if (sc->type > 1) WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); } static int wpi_hw_init(struct wpi_softc *sc) { uint8_t chnl; int ntries, error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); /* Clear pending interrupts. */ WPI_WRITE(sc, WPI_INT, 0xffffffff); if ((error = wpi_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } /* Select VMAIN power source. */ if ((error = wpi_nic_lock(sc)) != 0) return error; wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); wpi_nic_unlock(sc); /* Spin until VMAIN gets selected. */ for (ntries = 0; ntries < 5000; ntries++) { if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) break; DELAY(10); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout selecting power source\n"); return ETIMEDOUT; } /* Perform adapter initialization. */ wpi_nic_config(sc); /* Initialize RX ring. */ if ((error = wpi_nic_lock(sc)) != 0) return error; /* Set physical address of RX ring. */ WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); /* Set physical address of RX read pointer. */ WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + offsetof(struct wpi_shared, next)); WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); /* Enable RX. */ WPI_WRITE(sc, WPI_FH_RX_CONFIG, WPI_FH_RX_CONFIG_DMA_ENA | WPI_FH_RX_CONFIG_RDRBD_ENA | WPI_FH_RX_CONFIG_WRSTATUS_ENA | WPI_FH_RX_CONFIG_MAXFRAG | WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | WPI_FH_RX_CONFIG_IRQ_DST_HOST | WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ wpi_nic_unlock(sc); WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); /* Initialize TX rings. */ if ((error = wpi_nic_lock(sc)) != 0) return error; wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ /* Enable all 6 TX rings. */ wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); /* Set physical address of TX rings. */ WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); /* Enable all DMA channels. */ for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); } wpi_nic_unlock(sc); (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ /* Clear "radio off" and "commands blocked" bits. */ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); /* Clear pending interrupts. */ WPI_WRITE(sc, WPI_INT, 0xffffffff); /* Enable interrupts. */ WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); /* _Really_ make sure "radio off" bit is cleared! */ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); if ((error = wpi_load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not load firmware, error %d\n", __func__, error); return error; } /* Wait at most one second for firmware alive notification. */ if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); /* Do post-firmware initialization. */ return wpi_post_alive(sc); } static void wpi_hw_stop(struct wpi_softc *sc) { uint8_t chnl, qid; int ntries; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) wpi_nic_lock(sc); WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); /* Disable interrupts. */ WPI_WRITE(sc, WPI_INT_MASK, 0); WPI_WRITE(sc, WPI_INT, 0xffffffff); WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); /* Make sure we no longer hold the NIC lock. */ wpi_nic_unlock(sc); if (wpi_nic_lock(sc) == 0) { /* Stop TX scheduler. */ wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); /* Stop all DMA channels. */ for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); for (ntries = 0; ntries < 200; ntries++) { if (WPI_READ(sc, WPI_FH_TX_STATUS) & WPI_FH_TX_STATUS_IDLE(chnl)) break; DELAY(10); } } wpi_nic_unlock(sc); } /* Stop RX ring. */ wpi_reset_rx_ring(sc); /* Reset all TX rings. */ for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) wpi_reset_tx_ring(sc, &sc->txq[qid]); if (wpi_nic_lock(sc) == 0) { wpi_prph_write(sc, WPI_APMG_CLK_DIS, WPI_APMG_CLK_CTRL_DMA_CLK_RQT); wpi_nic_unlock(sc); } DELAY(5); /* Power OFF adapter. */ wpi_apm_stop(sc); } static void wpi_radio_on(void *arg0, int pending) { struct wpi_softc *sc = arg0; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "RF switch: radio enabled\n"); WPI_LOCK(sc); callout_stop(&sc->watchdog_rfkill); WPI_UNLOCK(sc); if (vap != NULL) ieee80211_init(vap); } static void wpi_radio_off(void *arg0, int pending) { struct wpi_softc *sc = arg0; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "RF switch: radio disabled\n"); ieee80211_notify_radio(ic, 0); wpi_stop(sc); if (vap != NULL) ieee80211_stop(vap); WPI_LOCK(sc); callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); WPI_UNLOCK(sc); } static int wpi_init(struct wpi_softc *sc) { int error = 0; WPI_LOCK(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); if (sc->sc_running != 0) goto end; /* Check that the radio is not disabled by hardware switch. */ if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { device_printf(sc->sc_dev, "RF switch: radio disabled (%s)\n", __func__); callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); error = EINPROGRESS; goto end; } /* Read firmware images from the filesystem. */ if ((error = wpi_read_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not read firmware, error %d\n", __func__, error); goto end; } sc->sc_running = 1; /* Initialize hardware and upload firmware. */ error = wpi_hw_init(sc); wpi_unload_firmware(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not initialize hardware, error %d\n", __func__, error); goto fail; } /* Configure adapter now that it is ready. */ if ((error = wpi_config(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not configure device, error %d\n", __func__, error); goto fail; } DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); WPI_UNLOCK(sc); return 0; fail: wpi_stop_locked(sc); end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); WPI_UNLOCK(sc); return error; } static void wpi_stop_locked(struct wpi_softc *sc) { WPI_LOCK_ASSERT(sc); if (sc->sc_running == 0) return; WPI_TX_LOCK(sc); WPI_TXQ_LOCK(sc); sc->sc_running = 0; WPI_TXQ_UNLOCK(sc); WPI_TX_UNLOCK(sc); WPI_TXQ_STATE_LOCK(sc); callout_stop(&sc->tx_timeout); WPI_TXQ_STATE_UNLOCK(sc); WPI_RXON_LOCK(sc); callout_stop(&sc->scan_timeout); callout_stop(&sc->calib_to); WPI_RXON_UNLOCK(sc); /* Power OFF hardware. */ wpi_hw_stop(sc); } static void wpi_stop(struct wpi_softc *sc) { WPI_LOCK(sc); wpi_stop_locked(sc); WPI_UNLOCK(sc); } /* * Callback from net80211 to start a scan. */ static void wpi_scan_start(struct ieee80211com *ic) { struct wpi_softc *sc = ic->ic_softc; wpi_set_led(sc, WPI_LED_LINK, 20, 2); } /* * Callback from net80211 to terminate a scan. */ static void wpi_scan_end(struct ieee80211com *ic) { struct wpi_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap->iv_state == IEEE80211_S_RUN) wpi_set_led(sc, WPI_LED_LINK, 0, 1); } /** * Called by the net80211 framework to indicate to the driver * that the channel should be changed */ static void wpi_set_channel(struct ieee80211com *ic) { const struct ieee80211_channel *c = ic->ic_curchan; struct wpi_softc *sc = ic->ic_softc; int error; DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); WPI_LOCK(sc); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); WPI_UNLOCK(sc); WPI_TX_LOCK(sc); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); WPI_TX_UNLOCK(sc); /* * Only need to set the channel in Monitor mode. AP scanning and auth * are already taken care of by their respective firmware commands. */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { WPI_RXON_LOCK(sc); sc->rxon.chan = ieee80211_chan2ieee(ic, c); if (IEEE80211_IS_CHAN_2GHZ(c)) { sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); } else { sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); } if ((error = wpi_send_rxon(sc, 0, 1)) != 0) device_printf(sc->sc_dev, "%s: error %d setting channel\n", __func__, error); WPI_RXON_UNLOCK(sc); } } /** * Called by net80211 to indicate that we need to scan the current * channel. The channel is previously be set via the wpi_set_channel * callback. */ static void wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ieee80211com *ic = vap->iv_ic; struct wpi_softc *sc = ic->ic_softc; int error; WPI_RXON_LOCK(sc); error = wpi_scan(sc, ic->ic_curchan); WPI_RXON_UNLOCK(sc); if (error != 0) ieee80211_cancel_scan(vap); } /** * Called by the net80211 framework to indicate * the minimum dwell time has been met, terminate the scan. * We don't actually terminate the scan as the firmware will notify * us when it's finished and we have no way to interrupt it. */ static void wpi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ }