diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c index 4af9450213ce..4878af65a4fe 100644 --- a/sys/net80211/ieee80211.c +++ b/sys/net80211/ieee80211.c @@ -1,2649 +1,2650 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 generic handler */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #include const char *ieee80211_phymode_name[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = "auto", [IEEE80211_MODE_11A] = "11a", [IEEE80211_MODE_11B] = "11b", [IEEE80211_MODE_11G] = "11g", [IEEE80211_MODE_FH] = "FH", [IEEE80211_MODE_TURBO_A] = "turboA", [IEEE80211_MODE_TURBO_G] = "turboG", [IEEE80211_MODE_STURBO_A] = "sturboA", [IEEE80211_MODE_HALF] = "half", [IEEE80211_MODE_QUARTER] = "quarter", [IEEE80211_MODE_11NA] = "11na", [IEEE80211_MODE_11NG] = "11ng", [IEEE80211_MODE_VHT_2GHZ] = "11acg", [IEEE80211_MODE_VHT_5GHZ] = "11ac", }; /* map ieee80211_opmode to the corresponding capability bit */ const int ieee80211_opcap[IEEE80211_OPMODE_MAX] = { [IEEE80211_M_IBSS] = IEEE80211_C_IBSS, [IEEE80211_M_WDS] = IEEE80211_C_WDS, [IEEE80211_M_STA] = IEEE80211_C_STA, [IEEE80211_M_AHDEMO] = IEEE80211_C_AHDEMO, [IEEE80211_M_HOSTAP] = IEEE80211_C_HOSTAP, [IEEE80211_M_MONITOR] = IEEE80211_C_MONITOR, #ifdef IEEE80211_SUPPORT_MESH [IEEE80211_M_MBSS] = IEEE80211_C_MBSS, #endif }; const uint8_t ieee80211broadcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag); static int ieee80211_media_setup(struct ieee80211com *ic, struct ifmedia *media, int caps, int addsta, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat); static int media_status(enum ieee80211_opmode, const struct ieee80211_channel *); static uint64_t ieee80211_get_counter(struct ifnet *, ift_counter); MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state"); /* * Default supported rates for 802.11 operation (in IEEE .5Mb units). */ #define B(r) ((r) | IEEE80211_RATE_BASIC) static const struct ieee80211_rateset ieee80211_rateset_11a = { 8, { B(12), 18, B(24), 36, B(48), 72, 96, 108 } }; static const struct ieee80211_rateset ieee80211_rateset_half = { 8, { B(6), 9, B(12), 18, B(24), 36, 48, 54 } }; static const struct ieee80211_rateset ieee80211_rateset_quarter = { 8, { B(3), 4, B(6), 9, B(12), 18, 24, 27 } }; static const struct ieee80211_rateset ieee80211_rateset_11b = { 4, { B(2), B(4), B(11), B(22) } }; /* NB: OFDM rates are handled specially based on mode */ static const struct ieee80211_rateset ieee80211_rateset_11g = { 12, { B(2), B(4), B(11), B(22), 12, 18, 24, 36, 48, 72, 96, 108 } }; #undef B static int set_vht_extchan(struct ieee80211_channel *c); /* * Fill in 802.11 available channel set, mark * all available channels as active, and pick * a default channel if not already specified. */ void ieee80211_chan_init(struct ieee80211com *ic) { #define DEFAULTRATES(m, def) do { \ if (ic->ic_sup_rates[m].rs_nrates == 0) \ ic->ic_sup_rates[m] = def; \ } while (0) struct ieee80211_channel *c; int i; KASSERT(0 < ic->ic_nchans && ic->ic_nchans <= IEEE80211_CHAN_MAX, ("invalid number of channels specified: %u", ic->ic_nchans)); memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail)); memset(ic->ic_modecaps, 0, sizeof(ic->ic_modecaps)); setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO); for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; KASSERT(c->ic_flags != 0, ("channel with no flags")); /* * Help drivers that work only with frequencies by filling * in IEEE channel #'s if not already calculated. Note this * mimics similar work done in ieee80211_setregdomain when * changing regulatory state. */ if (c->ic_ieee == 0) c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags); /* * Setup the HT40/VHT40 upper/lower bits. * The VHT80/... math is done elsewhere. */ if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0) c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq + (IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20), c->ic_flags); /* Update VHT math */ /* * XXX VHT again, note that this assumes VHT80/... channels * are legit already. */ set_vht_extchan(c); /* default max tx power to max regulatory */ if (c->ic_maxpower == 0) c->ic_maxpower = 2*c->ic_maxregpower; setbit(ic->ic_chan_avail, c->ic_ieee); /* * Identify mode capabilities. */ if (IEEE80211_IS_CHAN_A(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11A); if (IEEE80211_IS_CHAN_B(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11B); if (IEEE80211_IS_CHAN_ANYG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11G); if (IEEE80211_IS_CHAN_FHSS(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_FH); if (IEEE80211_IS_CHAN_108A(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_A); if (IEEE80211_IS_CHAN_108G(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_G); if (IEEE80211_IS_CHAN_ST(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_STURBO_A); if (IEEE80211_IS_CHAN_HALF(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_HALF); if (IEEE80211_IS_CHAN_QUARTER(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_QUARTER); if (IEEE80211_IS_CHAN_HTA(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11NA); if (IEEE80211_IS_CHAN_HTG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11NG); if (IEEE80211_IS_CHAN_VHTA(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_5GHZ); if (IEEE80211_IS_CHAN_VHTG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_2GHZ); } /* initialize candidate channels to all available */ memcpy(ic->ic_chan_active, ic->ic_chan_avail, sizeof(ic->ic_chan_avail)); /* sort channel table to allow lookup optimizations */ ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); /* invalidate any previous state */ ic->ic_bsschan = IEEE80211_CHAN_ANYC; ic->ic_prevchan = NULL; ic->ic_csa_newchan = NULL; /* arbitrarily pick the first channel */ ic->ic_curchan = &ic->ic_channels[0]; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); /* fillin well-known rate sets if driver has not specified */ DEFAULTRATES(IEEE80211_MODE_11B, ieee80211_rateset_11b); DEFAULTRATES(IEEE80211_MODE_11G, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_11A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_TURBO_A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_TURBO_G, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_STURBO_A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_HALF, ieee80211_rateset_half); DEFAULTRATES(IEEE80211_MODE_QUARTER, ieee80211_rateset_quarter); DEFAULTRATES(IEEE80211_MODE_11NA, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_11NG, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_VHT_2GHZ, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_VHT_5GHZ, ieee80211_rateset_11a); /* * Setup required information to fill the mcsset field, if driver did * not. Assume a 2T2R setup for historic reasons. */ if (ic->ic_rxstream == 0) ic->ic_rxstream = 2; if (ic->ic_txstream == 0) ic->ic_txstream = 2; ieee80211_init_suphtrates(ic); /* * Set auto mode to reset active channel state and any desired channel. */ (void) ieee80211_setmode(ic, IEEE80211_MODE_AUTO); #undef DEFAULTRATES } static void null_update_mcast(struct ieee80211com *ic) { ic_printf(ic, "need multicast update callback\n"); } static void null_update_promisc(struct ieee80211com *ic) { ic_printf(ic, "need promiscuous mode update callback\n"); } static void null_update_chw(struct ieee80211com *ic) { ic_printf(ic, "%s: need callback\n", __func__); } int ic_printf(struct ieee80211com *ic, const char * fmt, ...) { va_list ap; int retval; retval = printf("%s: ", ic->ic_name); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static LIST_HEAD(, ieee80211com) ic_head = LIST_HEAD_INITIALIZER(ic_head); static struct mtx ic_list_mtx; MTX_SYSINIT(ic_list, &ic_list_mtx, "ieee80211com list", MTX_DEF); static int sysctl_ieee80211coms(SYSCTL_HANDLER_ARGS) { struct ieee80211com *ic; struct sbuf sb; char *sp; int error; error = sysctl_wire_old_buffer(req, 0); if (error) return (error); sbuf_new_for_sysctl(&sb, NULL, 8, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); sp = ""; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) { sbuf_printf(&sb, "%s%s", sp, ic->ic_name); sp = " "; } mtx_unlock(&ic_list_mtx); error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } SYSCTL_PROC(_net_wlan, OID_AUTO, devices, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_ieee80211coms, "A", "names of available 802.11 devices"); /* * Attach/setup the common net80211 state. Called by * the driver on attach to prior to creating any vap's. */ void ieee80211_ifattach(struct ieee80211com *ic) { IEEE80211_LOCK_INIT(ic, ic->ic_name); IEEE80211_TX_LOCK_INIT(ic, ic->ic_name); TAILQ_INIT(&ic->ic_vaps); /* Create a taskqueue for all state changes */ - ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO, + ic->ic_tq = taskqueue_create("ic_taskq", + IEEE80211_M_WAITOK | IEEE80211_M_ZERO, taskqueue_thread_enqueue, &ic->ic_tq); taskqueue_start_threads(&ic->ic_tq, 1, PI_NET, "%s net80211 taskq", ic->ic_name); - ic->ic_ierrors = counter_u64_alloc(M_WAITOK); - ic->ic_oerrors = counter_u64_alloc(M_WAITOK); + ic->ic_ierrors = counter_u64_alloc(IEEE80211_M_WAITOK); + ic->ic_oerrors = counter_u64_alloc(IEEE80211_M_WAITOK); /* * Fill in 802.11 available channel set, mark all * available channels as active, and pick a default * channel if not already specified. */ ieee80211_chan_init(ic); ic->ic_update_mcast = null_update_mcast; ic->ic_update_promisc = null_update_promisc; ic->ic_update_chw = null_update_chw; ic->ic_hash_key = arc4random(); ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT; ic->ic_lintval = ic->ic_bintval; ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX; ieee80211_crypto_attach(ic); ieee80211_node_attach(ic); ieee80211_power_attach(ic); ieee80211_proto_attach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_attach(ic); #endif ieee80211_ht_attach(ic); ieee80211_vht_attach(ic); ieee80211_scan_attach(ic); ieee80211_regdomain_attach(ic); ieee80211_dfs_attach(ic); ieee80211_sysctl_attach(ic); mtx_lock(&ic_list_mtx); LIST_INSERT_HEAD(&ic_head, ic, ic_next); mtx_unlock(&ic_list_mtx); } /* * Detach net80211 state on device detach. Tear down * all vap's and reclaim all common state prior to the * device state going away. Note we may call back into * driver; it must be prepared for this. */ void ieee80211_ifdetach(struct ieee80211com *ic) { struct ieee80211vap *vap; /* * We use this as an indicator that ifattach never had a chance to be * called, e.g. early driver attach failed and ifdetach was called * during subsequent detach. Never fear, for we have nothing to do * here. */ if (ic->ic_tq == NULL) return; mtx_lock(&ic_list_mtx); LIST_REMOVE(ic, ic_next); mtx_unlock(&ic_list_mtx); taskqueue_drain(taskqueue_thread, &ic->ic_restart_task); /* * The VAP is responsible for setting and clearing * the VIMAGE context. */ while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) { ieee80211_com_vdetach(vap); ieee80211_vap_destroy(vap); } ieee80211_waitfor_parent(ic); ieee80211_sysctl_detach(ic); ieee80211_dfs_detach(ic); ieee80211_regdomain_detach(ic); ieee80211_scan_detach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_detach(ic); #endif ieee80211_vht_detach(ic); ieee80211_ht_detach(ic); /* NB: must be called before ieee80211_node_detach */ ieee80211_proto_detach(ic); ieee80211_crypto_detach(ic); ieee80211_power_detach(ic); ieee80211_node_detach(ic); counter_u64_free(ic->ic_ierrors); counter_u64_free(ic->ic_oerrors); taskqueue_free(ic->ic_tq); IEEE80211_TX_LOCK_DESTROY(ic); IEEE80211_LOCK_DESTROY(ic); } struct ieee80211com * ieee80211_find_com(const char *name) { struct ieee80211com *ic; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) if (strcmp(ic->ic_name, name) == 0) break; mtx_unlock(&ic_list_mtx); return (ic); } void ieee80211_iterate_coms(ieee80211_com_iter_func *f, void *arg) { struct ieee80211com *ic; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) (*f)(arg, ic); mtx_unlock(&ic_list_mtx); } /* * Default reset method for use with the ioctl support. This * method is invoked after any state change in the 802.11 * layer that should be propagated to the hardware but not * require re-initialization of the 802.11 state machine (e.g * rescanning for an ap). We always return ENETRESET which * should cause the driver to re-initialize the device. Drivers * can override this method to implement more optimized support. */ static int default_reset(struct ieee80211vap *vap, u_long cmd) { return ENETRESET; } /* * Default for updating the VAP default TX key index. * * Drivers that support TX offload as well as hardware encryption offload * may need to be informed of key index changes separate from the key * update. */ static void default_update_deftxkey(struct ieee80211vap *vap, ieee80211_keyix kid) { /* XXX assert validity */ /* XXX assert we're in a key update block */ vap->iv_def_txkey = kid; } /* * Add underlying device errors to vap errors. */ static uint64_t ieee80211_get_counter(struct ifnet *ifp, ift_counter cnt) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; uint64_t rv; rv = if_get_counter_default(ifp, cnt); switch (cnt) { case IFCOUNTER_OERRORS: rv += counter_u64_fetch(ic->ic_oerrors); break; case IFCOUNTER_IERRORS: rv += counter_u64_fetch(ic->ic_ierrors); break; default: break; } return (rv); } /* * Prepare a vap for use. Drivers use this call to * setup net80211 state in new vap's prior attaching * them with ieee80211_vap_attach (below). */ int ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN]) { struct ifnet *ifp; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { ic_printf(ic, "%s: unable to allocate ifnet\n", __func__); return ENOMEM; } if_initname(ifp, name, unit); ifp->if_softc = vap; /* back pointer */ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_transmit = ieee80211_vap_transmit; ifp->if_qflush = ieee80211_vap_qflush; ifp->if_ioctl = ieee80211_ioctl; ifp->if_init = ieee80211_init; ifp->if_get_counter = ieee80211_get_counter; vap->iv_ifp = ifp; vap->iv_ic = ic; vap->iv_flags = ic->ic_flags; /* propagate common flags */ vap->iv_flags_ext = ic->ic_flags_ext; vap->iv_flags_ven = ic->ic_flags_ven; vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE; /* 11n capabilities - XXX methodize */ vap->iv_htcaps = ic->ic_htcaps; vap->iv_htextcaps = ic->ic_htextcaps; /* 11ac capabilities - XXX methodize */ vap->iv_vhtcaps = ic->ic_vhtcaps; vap->iv_vhtextcaps = ic->ic_vhtextcaps; vap->iv_opmode = opmode; vap->iv_caps |= ieee80211_opcap[opmode]; IEEE80211_ADDR_COPY(vap->iv_myaddr, ic->ic_macaddr); switch (opmode) { case IEEE80211_M_WDS: /* * WDS links must specify the bssid of the far end. * For legacy operation this is a static relationship. * For non-legacy operation the station must associate * and be authorized to pass traffic. Plumbing the * vap to the proper node happens when the vap * transitions to RUN state. */ IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid); vap->iv_flags |= IEEE80211_F_DESBSSID; if (flags & IEEE80211_CLONE_WDSLEGACY) vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY; break; #ifdef IEEE80211_SUPPORT_TDMA case IEEE80211_M_AHDEMO: if (flags & IEEE80211_CLONE_TDMA) { /* NB: checked before clone operation allowed */ KASSERT(ic->ic_caps & IEEE80211_C_TDMA, ("not TDMA capable, ic_caps 0x%x", ic->ic_caps)); /* * Propagate TDMA capability to mark vap; this * cannot be removed and is used to distinguish * regular ahdemo operation from ahdemo+tdma. */ vap->iv_caps |= IEEE80211_C_TDMA; } break; #endif default: break; } /* auto-enable s/w beacon miss support */ if (flags & IEEE80211_CLONE_NOBEACONS) vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS; /* auto-generated or user supplied MAC address */ if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR)) vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC; /* * Enable various functionality by default if we're * capable; the driver can override us if it knows better. */ if (vap->iv_caps & IEEE80211_C_WME) vap->iv_flags |= IEEE80211_F_WME; if (vap->iv_caps & IEEE80211_C_BURST) vap->iv_flags |= IEEE80211_F_BURST; /* NB: bg scanning only makes sense for station mode right now */ if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_caps & IEEE80211_C_BGSCAN)) vap->iv_flags |= IEEE80211_F_BGSCAN; vap->iv_flags |= IEEE80211_F_DOTH; /* XXX no cap, just ena */ /* NB: DFS support only makes sense for ap mode right now */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->iv_caps & IEEE80211_C_DFS)) vap->iv_flags_ext |= IEEE80211_FEXT_DFS; /* NB: only flip on U-APSD for hostap/sta for now */ if ((vap->iv_opmode == IEEE80211_M_STA) || (vap->iv_opmode == IEEE80211_M_HOSTAP)) { if (vap->iv_caps & IEEE80211_C_UAPSD) vap->iv_flags_ext |= IEEE80211_FEXT_UAPSD; } vap->iv_des_chan = IEEE80211_CHAN_ANYC; /* any channel is ok */ vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT; vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT; /* * Install a default reset method for the ioctl support; * the driver can override this. */ vap->iv_reset = default_reset; /* * Install a default crypto key update method, the driver * can override this. */ vap->iv_update_deftxkey = default_update_deftxkey; ieee80211_sysctl_vattach(vap); ieee80211_crypto_vattach(vap); ieee80211_node_vattach(vap); ieee80211_power_vattach(vap); ieee80211_proto_vattach(vap); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_vattach(vap); #endif ieee80211_ht_vattach(vap); ieee80211_vht_vattach(vap); ieee80211_scan_vattach(vap); ieee80211_regdomain_vattach(vap); ieee80211_radiotap_vattach(vap); ieee80211_vap_reset_erp(vap); ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE); return 0; } /* * Activate a vap. State should have been prepared with a * call to ieee80211_vap_setup and by the driver. On return * from this call the vap is ready for use. */ int ieee80211_vap_attach(struct ieee80211vap *vap, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211com *ic = vap->iv_ic; struct ifmediareq imr; int maxrate; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s flags 0x%x flags_ext 0x%x\n", __func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name, vap->iv_flags, vap->iv_flags_ext); /* * Do late attach work that cannot happen until after * the driver has had a chance to override defaults. */ ieee80211_node_latevattach(vap); ieee80211_power_latevattach(vap); maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps, vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat); ieee80211_media_status(ifp, &imr); /* NB: strip explicit mode; we're actually in autoselect */ ifmedia_set(&vap->iv_media, imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO)); if (maxrate) ifp->if_baudrate = IF_Mbps(maxrate); ether_ifattach(ifp, macaddr); IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp)); /* hook output method setup by ether_ifattach */ vap->iv_output = ifp->if_output; ifp->if_output = ieee80211_output; /* NB: if_mtu set by ether_ifattach to ETHERMTU */ IEEE80211_LOCK(ic); TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next); ieee80211_syncflag_locked(ic, IEEE80211_F_WME); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP); #endif ieee80211_syncflag_locked(ic, IEEE80211_F_PCF); ieee80211_syncflag_locked(ic, IEEE80211_F_BURST); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80); IEEE80211_UNLOCK(ic); return 1; } /* * Tear down vap state and reclaim the ifnet. * The driver is assumed to have prepared for * this; e.g. by turning off interrupts for the * underlying device. */ void ieee80211_vap_detach(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s\n", __func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name); /* NB: bpfdetach is called by ether_ifdetach and claims all taps */ ether_ifdetach(ifp); ieee80211_stop(vap); /* * Flush any deferred vap tasks. */ ieee80211_draintask(ic, &vap->iv_nstate_task); ieee80211_draintask(ic, &vap->iv_swbmiss_task); ieee80211_draintask(ic, &vap->iv_wme_task); ieee80211_draintask(ic, &ic->ic_parent_task); /* XXX band-aid until ifnet handles this for us */ taskqueue_drain(taskqueue_swi, &ifp->if_linktask); IEEE80211_LOCK(ic); KASSERT(vap->iv_state == IEEE80211_S_INIT , ("vap still running")); TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next); ieee80211_syncflag_locked(ic, IEEE80211_F_WME); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP); #endif ieee80211_syncflag_locked(ic, IEEE80211_F_PCF); ieee80211_syncflag_locked(ic, IEEE80211_F_BURST); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80); /* NB: this handles the bpfdetach done below */ ieee80211_syncflag_ext_locked(ic, IEEE80211_FEXT_BPF); if (vap->iv_ifflags & IFF_PROMISC) ieee80211_promisc(vap, false); if (vap->iv_ifflags & IFF_ALLMULTI) ieee80211_allmulti(vap, false); IEEE80211_UNLOCK(ic); ifmedia_removeall(&vap->iv_media); ieee80211_radiotap_vdetach(vap); ieee80211_regdomain_vdetach(vap); ieee80211_scan_vdetach(vap); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_vdetach(vap); #endif ieee80211_vht_vdetach(vap); ieee80211_ht_vdetach(vap); /* NB: must be before ieee80211_node_vdetach */ ieee80211_proto_vdetach(vap); ieee80211_crypto_vdetach(vap); ieee80211_power_vdetach(vap); ieee80211_node_vdetach(vap); ieee80211_sysctl_vdetach(vap); if_free(ifp); CURVNET_RESTORE(); } /* * Count number of vaps in promisc, and issue promisc on * parent respectively. */ void ieee80211_promisc(struct ieee80211vap *vap, bool on) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); if (on) { if (++ic->ic_promisc == 1) ieee80211_runtask(ic, &ic->ic_promisc_task); } else { KASSERT(ic->ic_promisc > 0, ("%s: ic %p not promisc", __func__, ic)); if (--ic->ic_promisc == 0) ieee80211_runtask(ic, &ic->ic_promisc_task); } } /* * Count number of vaps in allmulti, and issue allmulti on * parent respectively. */ void ieee80211_allmulti(struct ieee80211vap *vap, bool on) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); if (on) { if (++ic->ic_allmulti == 1) ieee80211_runtask(ic, &ic->ic_mcast_task); } else { KASSERT(ic->ic_allmulti > 0, ("%s: ic %p not allmulti", __func__, ic)); if (--ic->ic_allmulti == 0) ieee80211_runtask(ic, &ic->ic_mcast_task); } } /* * Synchronize flag bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags & flag) { bit = 1; break; } if (bit) ic->ic_flags |= flag; else ic->ic_flags &= ~flag; } void ieee80211_syncflag(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags &= ~flag; } else vap->iv_flags |= flag; ieee80211_syncflag_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_ht bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_ht & flag) { bit = 1; break; } if (bit) ic->ic_flags_ht |= flag; else ic->ic_flags_ht &= ~flag; } void ieee80211_syncflag_ht(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_ht &= ~flag; } else vap->iv_flags_ht |= flag; ieee80211_syncflag_ht_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_vht bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_vht & flag) { bit = 1; break; } if (bit) ic->ic_flags_vht |= flag; else ic->ic_flags_vht &= ~flag; } void ieee80211_syncflag_vht(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_vht &= ~flag; } else vap->iv_flags_vht |= flag; ieee80211_syncflag_vht_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_ext bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_ext & flag) { bit = 1; break; } if (bit) ic->ic_flags_ext |= flag; else ic->ic_flags_ext &= ~flag; } void ieee80211_syncflag_ext(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_ext &= ~flag; } else vap->iv_flags_ext |= flag; ieee80211_syncflag_ext_locked(ic, flag); IEEE80211_UNLOCK(ic); } static __inline int mapgsm(u_int freq, u_int flags) { freq *= 10; if (flags & IEEE80211_CHAN_QUARTER) freq += 5; else if (flags & IEEE80211_CHAN_HALF) freq += 10; else freq += 20; /* NB: there is no 907/20 wide but leave room */ return (freq - 906*10) / 5; } static __inline int mappsb(u_int freq, u_int flags) { return 37 + ((freq * 10) + ((freq % 5) == 2 ? 5 : 0) - 49400) / 5; } /* * Convert MHz frequency to IEEE channel number. */ int ieee80211_mhz2ieee(u_int freq, u_int flags) { #define IS_FREQ_IN_PSB(_freq) ((_freq) > 4940 && (_freq) < 4990) if (flags & IEEE80211_CHAN_GSM) return mapgsm(freq, flags); if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */ if (freq == 2484) return 14; if (freq < 2484) return ((int) freq - 2407) / 5; else return 15 + ((freq - 2512) / 20); } else if (flags & IEEE80211_CHAN_5GHZ) { /* 5Ghz band */ if (freq <= 5000) { /* XXX check regdomain? */ if (IS_FREQ_IN_PSB(freq)) return mappsb(freq, flags); return (freq - 4000) / 5; } else return (freq - 5000) / 5; } else { /* either, guess */ if (freq == 2484) return 14; if (freq < 2484) { if (907 <= freq && freq <= 922) return mapgsm(freq, flags); return ((int) freq - 2407) / 5; } if (freq < 5000) { if (IS_FREQ_IN_PSB(freq)) return mappsb(freq, flags); else if (freq > 4900) return (freq - 4000) / 5; else return 15 + ((freq - 2512) / 20); } return (freq - 5000) / 5; } #undef IS_FREQ_IN_PSB } /* * Convert channel to IEEE channel number. */ int ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c) { if (c == NULL) { ic_printf(ic, "invalid channel (NULL)\n"); return 0; /* XXX */ } return (c == IEEE80211_CHAN_ANYC ? IEEE80211_CHAN_ANY : c->ic_ieee); } /* * Convert IEEE channel number to MHz frequency. */ u_int ieee80211_ieee2mhz(u_int chan, u_int flags) { if (flags & IEEE80211_CHAN_GSM) return 907 + 5 * (chan / 10); if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */ if (chan == 14) return 2484; if (chan < 14) return 2407 + chan*5; else return 2512 + ((chan-15)*20); } else if (flags & IEEE80211_CHAN_5GHZ) {/* 5Ghz band */ if (flags & (IEEE80211_CHAN_HALF|IEEE80211_CHAN_QUARTER)) { chan -= 37; return 4940 + chan*5 + (chan % 5 ? 2 : 0); } return 5000 + (chan*5); } else { /* either, guess */ /* XXX can't distinguish PSB+GSM channels */ if (chan == 14) return 2484; if (chan < 14) /* 0-13 */ return 2407 + chan*5; if (chan < 27) /* 15-26 */ return 2512 + ((chan-15)*20); return 5000 + (chan*5); } } static __inline void set_extchan(struct ieee80211_channel *c) { /* * IEEE Std 802.11-2012, page 1738, subclause 20.3.15.4: * "the secondary channel number shall be 'N + [1,-1] * 4' */ if (c->ic_flags & IEEE80211_CHAN_HT40U) c->ic_extieee = c->ic_ieee + 4; else if (c->ic_flags & IEEE80211_CHAN_HT40D) c->ic_extieee = c->ic_ieee - 4; else c->ic_extieee = 0; } /* * Populate the freq1/freq2 fields as appropriate for VHT channels. * * This for now uses a hard-coded list of 80MHz wide channels. * * For HT20/HT40, freq1 just is the centre frequency of the 40MHz * wide channel we've already decided upon. * * For VHT80 and VHT160, there are only a small number of fixed * 80/160MHz wide channels, so we just use those. * * This is all likely very very wrong - both the regulatory code * and this code needs to ensure that all four channels are * available and valid before the VHT80 (and eight for VHT160) channel * is created. */ struct vht_chan_range { uint16_t freq_start; uint16_t freq_end; }; struct vht_chan_range vht80_chan_ranges[] = { { 5170, 5250 }, { 5250, 5330 }, { 5490, 5570 }, { 5570, 5650 }, { 5650, 5730 }, { 5735, 5815 }, { 0, 0 } }; struct vht_chan_range vht160_chan_ranges[] = { { 5170, 5330 }, { 5490, 5650 }, { 0, 0 } }; static int set_vht_extchan(struct ieee80211_channel *c) { int i; if (! IEEE80211_IS_CHAN_VHT(c)) return (0); if (IEEE80211_IS_CHAN_VHT80P80(c)) { printf("%s: TODO VHT80+80 channel (ieee=%d, flags=0x%08x)\n", __func__, c->ic_ieee, c->ic_flags); } if (IEEE80211_IS_CHAN_VHT160(c)) { for (i = 0; vht160_chan_ranges[i].freq_start != 0; i++) { if (c->ic_freq >= vht160_chan_ranges[i].freq_start && c->ic_freq < vht160_chan_ranges[i].freq_end) { int midpoint; midpoint = vht160_chan_ranges[i].freq_start + 80; c->ic_vht_ch_freq1 = ieee80211_mhz2ieee(midpoint, c->ic_flags); c->ic_vht_ch_freq2 = 0; #if 0 printf("%s: %d, freq=%d, midpoint=%d, freq1=%d, freq2=%d\n", __func__, c->ic_ieee, c->ic_freq, midpoint, c->ic_vht_ch_freq1, c->ic_vht_ch_freq2); #endif return (1); } } return (0); } if (IEEE80211_IS_CHAN_VHT80(c)) { for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) { if (c->ic_freq >= vht80_chan_ranges[i].freq_start && c->ic_freq < vht80_chan_ranges[i].freq_end) { int midpoint; midpoint = vht80_chan_ranges[i].freq_start + 40; c->ic_vht_ch_freq1 = ieee80211_mhz2ieee(midpoint, c->ic_flags); c->ic_vht_ch_freq2 = 0; #if 0 printf("%s: %d, freq=%d, midpoint=%d, freq1=%d, freq2=%d\n", __func__, c->ic_ieee, c->ic_freq, midpoint, c->ic_vht_ch_freq1, c->ic_vht_ch_freq2); #endif return (1); } } return (0); } if (IEEE80211_IS_CHAN_VHT40(c)) { if (IEEE80211_IS_CHAN_HT40U(c)) c->ic_vht_ch_freq1 = c->ic_ieee + 2; else if (IEEE80211_IS_CHAN_HT40D(c)) c->ic_vht_ch_freq1 = c->ic_ieee - 2; else return (0); return (1); } if (IEEE80211_IS_CHAN_VHT20(c)) { c->ic_vht_ch_freq1 = c->ic_ieee; return (1); } printf("%s: unknown VHT channel type (ieee=%d, flags=0x%08x)\n", __func__, c->ic_ieee, c->ic_flags); return (0); } /* * Return whether the current channel could possibly be a part of * a VHT80/VHT160 channel. * * This doesn't check that the whole range is in the allowed list * according to regulatory. */ static bool is_vht160_valid_freq(uint16_t freq) { int i; for (i = 0; vht160_chan_ranges[i].freq_start != 0; i++) { if (freq >= vht160_chan_ranges[i].freq_start && freq < vht160_chan_ranges[i].freq_end) return (true); } return (false); } static int is_vht80_valid_freq(uint16_t freq) { int i; for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) { if (freq >= vht80_chan_ranges[i].freq_start && freq < vht80_chan_ranges[i].freq_end) return (1); } return (0); } static int addchan(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t flags) { struct ieee80211_channel *c; if (*nchans >= maxchans) return (ENOBUFS); #if 0 printf("%s: %d of %d: ieee=%d, freq=%d, flags=0x%08x\n", __func__, *nchans, maxchans, ieee, freq, flags); #endif c = &chans[(*nchans)++]; c->ic_ieee = ieee; c->ic_freq = freq != 0 ? freq : ieee80211_ieee2mhz(ieee, flags); c->ic_maxregpower = maxregpower; c->ic_maxpower = 2 * maxregpower; c->ic_flags = flags; c->ic_vht_ch_freq1 = 0; c->ic_vht_ch_freq2 = 0; set_extchan(c); set_vht_extchan(c); return (0); } static int copychan_prev(struct ieee80211_channel chans[], int maxchans, int *nchans, uint32_t flags) { struct ieee80211_channel *c; KASSERT(*nchans > 0, ("channel list is empty\n")); if (*nchans >= maxchans) return (ENOBUFS); #if 0 printf("%s: %d of %d: flags=0x%08x\n", __func__, *nchans, maxchans, flags); #endif c = &chans[(*nchans)++]; c[0] = c[-1]; c->ic_flags = flags; c->ic_vht_ch_freq1 = 0; c->ic_vht_ch_freq2 = 0; set_extchan(c); set_vht_extchan(c); return (0); } /* * XXX VHT-2GHz */ static void getflags_2ghz(const uint8_t bands[], uint32_t flags[], int cbw_flags) { int nmodes; nmodes = 0; if (isset(bands, IEEE80211_MODE_11B)) flags[nmodes++] = IEEE80211_CHAN_B; if (isset(bands, IEEE80211_MODE_11G)) flags[nmodes++] = IEEE80211_CHAN_G; if (isset(bands, IEEE80211_MODE_11NG)) flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT20; if (cbw_flags & NET80211_CBW_FLAG_HT40) { flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U; flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D; } flags[nmodes] = 0; } static void getflags_5ghz(const uint8_t bands[], uint32_t flags[], int cbw_flags) { int nmodes; /* * The addchan_list() function seems to expect the flags array to * be in channel width order, so the VHT bits are interspersed * as appropriate to maintain said order. * * It also assumes HT40U is before HT40D. */ nmodes = 0; /* 20MHz */ if (isset(bands, IEEE80211_MODE_11A)) flags[nmodes++] = IEEE80211_CHAN_A; if (isset(bands, IEEE80211_MODE_11NA)) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20; if (isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20 | IEEE80211_CHAN_VHT20; } /* 40MHz */ if (cbw_flags & NET80211_CBW_FLAG_HT40) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U; if ((cbw_flags & NET80211_CBW_FLAG_HT40) && isset(bands, IEEE80211_MODE_VHT_5GHZ)) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT40U; if (cbw_flags & NET80211_CBW_FLAG_HT40) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D; if ((cbw_flags & NET80211_CBW_FLAG_HT40) && isset(bands, IEEE80211_MODE_VHT_5GHZ)) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT40D; /* 80MHz */ if ((cbw_flags & NET80211_CBW_FLAG_VHT80) && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT80; flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT80; } /* VHT160 */ if ((cbw_flags & NET80211_CBW_FLAG_VHT160) && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT160; flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT160; } /* VHT80+80 */ if ((cbw_flags & NET80211_CBW_FLAG_VHT80P80) && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT80P80; flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT80P80; } flags[nmodes] = 0; } static void getflags(const uint8_t bands[], uint32_t flags[], int cbw_flags) { flags[0] = 0; if (isset(bands, IEEE80211_MODE_11A) || isset(bands, IEEE80211_MODE_11NA) || isset(bands, IEEE80211_MODE_VHT_5GHZ)) { if (isset(bands, IEEE80211_MODE_11B) || isset(bands, IEEE80211_MODE_11G) || isset(bands, IEEE80211_MODE_11NG) || isset(bands, IEEE80211_MODE_VHT_2GHZ)) return; getflags_5ghz(bands, flags, cbw_flags); } else getflags_2ghz(bands, flags, cbw_flags); } /* * Add one 20 MHz channel into specified channel list. * You MUST NOT mix bands when calling this. It will not add 5ghz * channels if you have any B/G/N band bit set. * The _cbw() variant does also support HT40/VHT80/160/80+80. */ int ieee80211_add_channel_cbw(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t chan_flags, const uint8_t bands[], int cbw_flags) { uint32_t flags[IEEE80211_MODE_MAX]; int i, error; getflags(bands, flags, cbw_flags); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); error = addchan(chans, maxchans, nchans, ieee, freq, maxregpower, flags[0] | chan_flags); for (i = 1; flags[i] != 0 && error == 0; i++) { error = copychan_prev(chans, maxchans, nchans, flags[i] | chan_flags); } return (error); } int ieee80211_add_channel(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t chan_flags, const uint8_t bands[]) { return (ieee80211_add_channel_cbw(chans, maxchans, nchans, ieee, freq, maxregpower, chan_flags, bands, 0)); } static struct ieee80211_channel * findchannel(struct ieee80211_channel chans[], int nchans, uint16_t freq, uint32_t flags) { struct ieee80211_channel *c; int i; flags &= IEEE80211_CHAN_ALLTURBO; /* brute force search */ for (i = 0; i < nchans; i++) { c = &chans[i]; if (c->ic_freq == freq && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; } return NULL; } /* * Add 40 MHz channel pair into specified channel list. */ /* XXX VHT */ int ieee80211_add_channel_ht40(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, int8_t maxregpower, uint32_t flags) { struct ieee80211_channel *cent, *extc; uint16_t freq; int error; freq = ieee80211_ieee2mhz(ieee, flags); /* * Each entry defines an HT40 channel pair; find the * center channel, then the extension channel above. */ flags |= IEEE80211_CHAN_HT20; cent = findchannel(chans, *nchans, freq, flags); if (cent == NULL) return (EINVAL); extc = findchannel(chans, *nchans, freq + 20, flags); if (extc == NULL) return (ENOENT); flags &= ~IEEE80211_CHAN_HT; error = addchan(chans, maxchans, nchans, cent->ic_ieee, cent->ic_freq, maxregpower, flags | IEEE80211_CHAN_HT40U); if (error != 0) return (error); error = addchan(chans, maxchans, nchans, extc->ic_ieee, extc->ic_freq, maxregpower, flags | IEEE80211_CHAN_HT40D); return (error); } /* * Fetch the center frequency for the primary channel. */ uint32_t ieee80211_get_channel_center_freq(const struct ieee80211_channel *c) { return (c->ic_freq); } /* * Fetch the center frequency for the primary BAND channel. * * For 5, 10, 20MHz channels it'll be the normally configured channel * frequency. * * For 40MHz, 80MHz, 160Mhz channels it'll the the centre of the * wide channel, not the centre of the primary channel (that's ic_freq). * * For 80+80MHz channels this will be the centre of the primary * 80MHz channel; the secondary 80MHz channel will be center_freq2(). */ uint32_t ieee80211_get_channel_center_freq1(const struct ieee80211_channel *c) { /* * VHT - use the pre-calculated centre frequency * of the given channel. */ if (IEEE80211_IS_CHAN_VHT(c)) return (ieee80211_ieee2mhz(c->ic_vht_ch_freq1, c->ic_flags)); if (IEEE80211_IS_CHAN_HT40U(c)) { return (c->ic_freq + 10); } if (IEEE80211_IS_CHAN_HT40D(c)) { return (c->ic_freq - 10); } return (c->ic_freq); } /* * For now, no 80+80 support; it will likely always return 0. */ uint32_t ieee80211_get_channel_center_freq2(const struct ieee80211_channel *c) { if (IEEE80211_IS_CHAN_VHT(c) && (c->ic_vht_ch_freq2 != 0)) return (ieee80211_ieee2mhz(c->ic_vht_ch_freq2, c->ic_flags)); return (0); } /* * Adds channels into specified channel list (ieee[] array must be sorted). * Channels are already sorted. */ static int add_chanlist(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, uint32_t flags[]) { uint16_t freq; int i, j, error; int is_vht; for (i = 0; i < nieee; i++) { freq = ieee80211_ieee2mhz(ieee[i], flags[0]); for (j = 0; flags[j] != 0; j++) { /* * Notes: * + HT40 and VHT40 channels occur together, so * we need to be careful that we actually allow that. * + VHT80, VHT160 will coexist with HT40/VHT40, so * make sure it's not skipped because of the overlap * check used for (V)HT40. */ is_vht = !! (flags[j] & IEEE80211_CHAN_VHT); /* XXX TODO FIXME VHT80P80. */ /* Test for VHT160 analogue to the VHT80 below. */ if (is_vht && flags[j] & IEEE80211_CHAN_VHT160) if (! is_vht160_valid_freq(freq)) continue; /* * Test for VHT80. * XXX This is all very broken right now. * What we /should/ do is: * * + check that the frequency is in the list of * allowed VHT80 ranges; and * + the other 3 channels in the list are actually * also available. */ if (is_vht && flags[j] & IEEE80211_CHAN_VHT80) if (! is_vht80_valid_freq(freq)) continue; /* * Test for (V)HT40. * * This is also a fall through from VHT80; as we only * allow a VHT80 channel if the VHT40 combination is * also valid. If the VHT40 form is not valid then * we certainly can't do VHT80.. */ if (flags[j] & IEEE80211_CHAN_HT40D) /* * Can't have a "lower" channel if we are the * first channel. * * Can't have a "lower" channel if it's below/ * within 20MHz of the first channel. * * Can't have a "lower" channel if the channel * below it is not 20MHz away. */ if (i == 0 || ieee[i] < ieee[0] + 4 || freq - 20 != ieee80211_ieee2mhz(ieee[i] - 4, flags[j])) continue; if (flags[j] & IEEE80211_CHAN_HT40U) /* * Can't have an "upper" channel if we are * the last channel. * * Can't have an "upper" channel be above the * last channel in the list. * * Can't have an "upper" channel if the next * channel according to the math isn't 20MHz * away. (Likely for channel 13/14.) */ if (i == nieee - 1 || ieee[i] + 4 > ieee[nieee - 1] || freq + 20 != ieee80211_ieee2mhz(ieee[i] + 4, flags[j])) continue; if (j == 0) { error = addchan(chans, maxchans, nchans, ieee[i], freq, 0, flags[j]); } else { error = copychan_prev(chans, maxchans, nchans, flags[j]); } if (error != 0) return (error); } } return (0); } int ieee80211_add_channel_list_2ghz(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[], int cbw_flags) { uint32_t flags[IEEE80211_MODE_MAX]; /* XXX no VHT for now */ getflags_2ghz(bands, flags, cbw_flags); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags)); } int ieee80211_add_channels_default_2ghz(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t bands[], int cbw_flags) { const uint8_t default_chan_list[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; return (ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, default_chan_list, nitems(default_chan_list), bands, cbw_flags)); } int ieee80211_add_channel_list_5ghz(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[], int cbw_flags) { /* * XXX-BZ with HT and VHT there is no 1:1 mapping anymore. Review all * uses of IEEE80211_MODE_MAX and add a new #define name for array size. */ uint32_t flags[2 * IEEE80211_MODE_MAX]; getflags_5ghz(bands, flags, cbw_flags); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags)); } /* * Locate a channel given a frequency+flags. We cache * the previous lookup to optimize switching between two * channels--as happens with dynamic turbo. */ struct ieee80211_channel * ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags) { struct ieee80211_channel *c; flags &= IEEE80211_CHAN_ALLTURBO; c = ic->ic_prevchan; if (c != NULL && c->ic_freq == freq && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; /* brute force search */ return (findchannel(ic->ic_channels, ic->ic_nchans, freq, flags)); } /* * Locate a channel given a channel number+flags. We cache * the previous lookup to optimize switching between two * channels--as happens with dynamic turbo. */ struct ieee80211_channel * ieee80211_find_channel_byieee(struct ieee80211com *ic, int ieee, int flags) { struct ieee80211_channel *c; int i; flags &= IEEE80211_CHAN_ALLTURBO; c = ic->ic_prevchan; if (c != NULL && c->ic_ieee == ieee && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; /* brute force search */ for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (c->ic_ieee == ieee && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; } return NULL; } /* * Lookup a channel suitable for the given rx status. * * This is used to find a channel for a frame (eg beacon, probe * response) based purely on the received PHY information. * * For now it tries to do it based on R_FREQ / R_IEEE. * This is enough for 11bg and 11a (and thus 11ng/11na) * but it will not be enough for GSM, PSB channels and the * like. It also doesn't know about legacy-turbog and * legacy-turbo modes, which some offload NICs actually * support in weird ways. * * Takes the ic and rxstatus; returns the channel or NULL * if not found. * * XXX TODO: Add support for that when the need arises. */ struct ieee80211_channel * ieee80211_lookup_channel_rxstatus(struct ieee80211vap *vap, const struct ieee80211_rx_stats *rxs) { struct ieee80211com *ic = vap->iv_ic; uint32_t flags; struct ieee80211_channel *c; if (rxs == NULL) return (NULL); /* * Strictly speaking we only use freq for now, * however later on we may wish to just store * the ieee for verification. */ if ((rxs->r_flags & IEEE80211_R_FREQ) == 0) return (NULL); if ((rxs->r_flags & IEEE80211_R_IEEE) == 0) return (NULL); if ((rxs->r_flags & IEEE80211_R_BAND) == 0) return (NULL); /* * If the rx status contains a valid ieee/freq, then * ensure we populate the correct channel information * in rxchan before passing it up to the scan infrastructure. * Offload NICs will pass up beacons from all channels * during background scans. */ /* Determine a band */ switch (rxs->c_band) { case IEEE80211_CHAN_2GHZ: flags = IEEE80211_CHAN_G; break; case IEEE80211_CHAN_5GHZ: flags = IEEE80211_CHAN_A; break; default: if (rxs->c_freq < 3000) { flags = IEEE80211_CHAN_G; } else { flags = IEEE80211_CHAN_A; } break; } /* Channel lookup */ c = ieee80211_find_channel(ic, rxs->c_freq, flags); IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT, "%s: freq=%d, ieee=%d, flags=0x%08x; c=%p\n", __func__, (int) rxs->c_freq, (int) rxs->c_ieee, flags, c); return (c); } static void addmedia(struct ifmedia *media, int caps, int addsta, int mode, int mword) { #define ADD(_ic, _s, _o) \ ifmedia_add(media, \ IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL) static const u_int mopts[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = IFM_AUTO, [IEEE80211_MODE_11A] = IFM_IEEE80211_11A, [IEEE80211_MODE_11B] = IFM_IEEE80211_11B, [IEEE80211_MODE_11G] = IFM_IEEE80211_11G, [IEEE80211_MODE_FH] = IFM_IEEE80211_FH, [IEEE80211_MODE_TURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO, [IEEE80211_MODE_TURBO_G] = IFM_IEEE80211_11G|IFM_IEEE80211_TURBO, [IEEE80211_MODE_STURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO, [IEEE80211_MODE_HALF] = IFM_IEEE80211_11A, /* XXX */ [IEEE80211_MODE_QUARTER] = IFM_IEEE80211_11A, /* XXX */ [IEEE80211_MODE_11NA] = IFM_IEEE80211_11NA, [IEEE80211_MODE_11NG] = IFM_IEEE80211_11NG, [IEEE80211_MODE_VHT_2GHZ] = IFM_IEEE80211_VHT2G, [IEEE80211_MODE_VHT_5GHZ] = IFM_IEEE80211_VHT5G, }; u_int mopt; mopt = mopts[mode]; if (addsta) ADD(ic, mword, mopt); /* STA mode has no cap */ if (caps & IEEE80211_C_IBSS) ADD(media, mword, mopt | IFM_IEEE80211_ADHOC); if (caps & IEEE80211_C_HOSTAP) ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP); if (caps & IEEE80211_C_AHDEMO) ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0); if (caps & IEEE80211_C_MONITOR) ADD(media, mword, mopt | IFM_IEEE80211_MONITOR); if (caps & IEEE80211_C_WDS) ADD(media, mword, mopt | IFM_IEEE80211_WDS); if (caps & IEEE80211_C_MBSS) ADD(media, mword, mopt | IFM_IEEE80211_MBSS); #undef ADD } /* * Setup the media data structures according to the channel and * rate tables. */ static int ieee80211_media_setup(struct ieee80211com *ic, struct ifmedia *media, int caps, int addsta, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat) { int i, j, rate, maxrate, mword, r; enum ieee80211_phymode mode; const struct ieee80211_rateset *rs; struct ieee80211_rateset allrates; /* * Fill in media characteristics. */ ifmedia_init(media, 0, media_change, media_stat); maxrate = 0; /* * Add media for legacy operating modes. */ memset(&allrates, 0, sizeof(allrates)); for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_11NA; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); if (mode == IEEE80211_MODE_AUTO) continue; rs = &ic->ic_sup_rates[mode]; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; mword = ieee80211_rate2media(ic, rate, mode); if (mword == 0) continue; addmedia(media, caps, addsta, mode, mword); /* * Add legacy rate to the collection of all rates. */ r = rate & IEEE80211_RATE_VAL; for (j = 0; j < allrates.rs_nrates; j++) if (allrates.rs_rates[j] == r) break; if (j == allrates.rs_nrates) { /* unique, add to the set */ allrates.rs_rates[j] = r; allrates.rs_nrates++; } rate = (rate & IEEE80211_RATE_VAL) / 2; if (rate > maxrate) maxrate = rate; } } for (i = 0; i < allrates.rs_nrates; i++) { mword = ieee80211_rate2media(ic, allrates.rs_rates[i], IEEE80211_MODE_AUTO); if (mword == 0) continue; /* NB: remove media options from mword */ addmedia(media, caps, addsta, IEEE80211_MODE_AUTO, IFM_SUBTYPE(mword)); } /* * Add HT/11n media. Note that we do not have enough * bits in the media subtype to express the MCS so we * use a "placeholder" media subtype and any fixed MCS * must be specified with a different mechanism. */ for (; mode <= IEEE80211_MODE_11NG; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); addmedia(media, caps, addsta, mode, IFM_IEEE80211_MCS); } if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) || isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) { addmedia(media, caps, addsta, IEEE80211_MODE_AUTO, IFM_IEEE80211_MCS); i = ic->ic_txstream * 8 - 1; if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) && (ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40)) rate = ieee80211_htrates[i].ht40_rate_400ns; else if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40)) rate = ieee80211_htrates[i].ht40_rate_800ns; else if ((ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20)) rate = ieee80211_htrates[i].ht20_rate_400ns; else rate = ieee80211_htrates[i].ht20_rate_800ns; if (rate > maxrate) maxrate = rate; } /* * Add VHT media. * XXX-BZ skip "VHT_2GHZ" for now. */ for (mode = IEEE80211_MODE_VHT_5GHZ; mode <= IEEE80211_MODE_VHT_5GHZ; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); addmedia(media, caps, addsta, mode, IFM_IEEE80211_VHT); } if (isset(ic->ic_modecaps, IEEE80211_MODE_VHT_5GHZ)) { addmedia(media, caps, addsta, IEEE80211_MODE_AUTO, IFM_IEEE80211_VHT); /* XXX TODO: VHT maxrate */ } return maxrate; } /* XXX inline or eliminate? */ const struct ieee80211_rateset * ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *c) { /* XXX does this work for 11ng basic rates? */ return &ic->ic_sup_rates[ieee80211_chan2mode(c)]; } /* XXX inline or eliminate? */ const struct ieee80211_htrateset * ieee80211_get_suphtrates(struct ieee80211com *ic, const struct ieee80211_channel *c) { return &ic->ic_sup_htrates; } void ieee80211_announce(struct ieee80211com *ic) { int i, rate, mword; enum ieee80211_phymode mode; const struct ieee80211_rateset *rs; /* NB: skip AUTO since it has no rates */ for (mode = IEEE80211_MODE_AUTO+1; mode < IEEE80211_MODE_11NA; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; ic_printf(ic, "%s rates: ", ieee80211_phymode_name[mode]); rs = &ic->ic_sup_rates[mode]; for (i = 0; i < rs->rs_nrates; i++) { mword = ieee80211_rate2media(ic, rs->rs_rates[i], mode); if (mword == 0) continue; rate = ieee80211_media2rate(mword); printf("%s%d%sMbps", (i != 0 ? " " : ""), rate / 2, ((rate & 0x1) != 0 ? ".5" : "")); } printf("\n"); } ieee80211_ht_announce(ic); ieee80211_vht_announce(ic); } void ieee80211_announce_channels(struct ieee80211com *ic) { const struct ieee80211_channel *c; char type; int i, cw; printf("Chan Freq CW RegPwr MinPwr MaxPwr\n"); for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (IEEE80211_IS_CHAN_ST(c)) type = 'S'; else if (IEEE80211_IS_CHAN_108A(c)) type = 'T'; else if (IEEE80211_IS_CHAN_108G(c)) type = 'G'; else if (IEEE80211_IS_CHAN_HT(c)) type = 'n'; else if (IEEE80211_IS_CHAN_A(c)) type = 'a'; else if (IEEE80211_IS_CHAN_ANYG(c)) type = 'g'; else if (IEEE80211_IS_CHAN_B(c)) type = 'b'; else type = 'f'; if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_TURBO(c)) cw = 40; else if (IEEE80211_IS_CHAN_HALF(c)) cw = 10; else if (IEEE80211_IS_CHAN_QUARTER(c)) cw = 5; else cw = 20; printf("%4d %4d%c %2d%c %6d %4d.%d %4d.%d\n" , c->ic_ieee, c->ic_freq, type , cw , IEEE80211_IS_CHAN_HT40U(c) ? '+' : IEEE80211_IS_CHAN_HT40D(c) ? '-' : ' ' , c->ic_maxregpower , c->ic_minpower / 2, c->ic_minpower & 1 ? 5 : 0 , c->ic_maxpower / 2, c->ic_maxpower & 1 ? 5 : 0 ); } } static int media2mode(const struct ifmedia_entry *ime, uint32_t flags, uint16_t *mode) { switch (IFM_MODE(ime->ifm_media)) { case IFM_IEEE80211_11A: *mode = IEEE80211_MODE_11A; break; case IFM_IEEE80211_11B: *mode = IEEE80211_MODE_11B; break; case IFM_IEEE80211_11G: *mode = IEEE80211_MODE_11G; break; case IFM_IEEE80211_FH: *mode = IEEE80211_MODE_FH; break; case IFM_IEEE80211_11NA: *mode = IEEE80211_MODE_11NA; break; case IFM_IEEE80211_11NG: *mode = IEEE80211_MODE_11NG; break; case IFM_IEEE80211_VHT2G: *mode = IEEE80211_MODE_VHT_2GHZ; break; case IFM_IEEE80211_VHT5G: *mode = IEEE80211_MODE_VHT_5GHZ; break; case IFM_AUTO: *mode = IEEE80211_MODE_AUTO; break; default: return 0; } /* * Turbo mode is an ``option''. * XXX does not apply to AUTO */ if (ime->ifm_media & IFM_IEEE80211_TURBO) { if (*mode == IEEE80211_MODE_11A) { if (flags & IEEE80211_F_TURBOP) *mode = IEEE80211_MODE_TURBO_A; else *mode = IEEE80211_MODE_STURBO_A; } else if (*mode == IEEE80211_MODE_11G) *mode = IEEE80211_MODE_TURBO_G; else return 0; } /* XXX HT40 +/- */ return 1; } /* * Handle a media change request on the vap interface. */ int ieee80211_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; struct ifmedia_entry *ime = vap->iv_media.ifm_cur; uint16_t newmode; if (!media2mode(ime, vap->iv_flags, &newmode)) return EINVAL; if (vap->iv_des_mode != newmode) { vap->iv_des_mode = newmode; /* XXX kick state machine if up+running */ } return 0; } /* * Common code to calculate the media status word * from the operating mode and channel state. */ static int media_status(enum ieee80211_opmode opmode, const struct ieee80211_channel *chan) { int status; status = IFM_IEEE80211; switch (opmode) { case IEEE80211_M_STA: break; case IEEE80211_M_IBSS: status |= IFM_IEEE80211_ADHOC; break; case IEEE80211_M_HOSTAP: status |= IFM_IEEE80211_HOSTAP; break; case IEEE80211_M_MONITOR: status |= IFM_IEEE80211_MONITOR; break; case IEEE80211_M_AHDEMO: status |= IFM_IEEE80211_ADHOC | IFM_FLAG0; break; case IEEE80211_M_WDS: status |= IFM_IEEE80211_WDS; break; case IEEE80211_M_MBSS: status |= IFM_IEEE80211_MBSS; break; } if (IEEE80211_IS_CHAN_VHT_5GHZ(chan)) { status |= IFM_IEEE80211_VHT5G; } else if (IEEE80211_IS_CHAN_VHT_2GHZ(chan)) { status |= IFM_IEEE80211_VHT2G; } else if (IEEE80211_IS_CHAN_HTA(chan)) { status |= IFM_IEEE80211_11NA; } else if (IEEE80211_IS_CHAN_HTG(chan)) { status |= IFM_IEEE80211_11NG; } else if (IEEE80211_IS_CHAN_A(chan)) { status |= IFM_IEEE80211_11A; } else if (IEEE80211_IS_CHAN_B(chan)) { status |= IFM_IEEE80211_11B; } else if (IEEE80211_IS_CHAN_ANYG(chan)) { status |= IFM_IEEE80211_11G; } else if (IEEE80211_IS_CHAN_FHSS(chan)) { status |= IFM_IEEE80211_FH; } /* XXX else complain? */ if (IEEE80211_IS_CHAN_TURBO(chan)) status |= IFM_IEEE80211_TURBO; #if 0 if (IEEE80211_IS_CHAN_HT20(chan)) status |= IFM_IEEE80211_HT20; if (IEEE80211_IS_CHAN_HT40(chan)) status |= IFM_IEEE80211_HT40; #endif return status; } void ieee80211_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; enum ieee80211_phymode mode; imr->ifm_status = IFM_AVALID; /* * NB: use the current channel's mode to lock down a xmit * rate only when running; otherwise we may have a mismatch * in which case the rate will not be convertible. */ if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) { imr->ifm_status |= IFM_ACTIVE; mode = ieee80211_chan2mode(ic->ic_curchan); } else mode = IEEE80211_MODE_AUTO; imr->ifm_active = media_status(vap->iv_opmode, ic->ic_curchan); /* * Calculate a current rate if possible. */ if (vap->iv_txparms[mode].ucastrate != IEEE80211_FIXED_RATE_NONE) { /* * A fixed rate is set, report that. */ imr->ifm_active |= ieee80211_rate2media(ic, vap->iv_txparms[mode].ucastrate, mode); } else if (vap->iv_opmode == IEEE80211_M_STA) { /* * In station mode report the current transmit rate. */ imr->ifm_active |= ieee80211_rate2media(ic, vap->iv_bss->ni_txrate, mode); } else imr->ifm_active |= IFM_AUTO; if (imr->ifm_status & IFM_ACTIVE) imr->ifm_current = imr->ifm_active; } /* * Set the current phy mode and recalculate the active channel * set based on the available channels for this mode. Also * select a new default/current channel if the current one is * inappropriate for this mode. */ int ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode) { /* * Adjust basic rates in 11b/11g supported rate set. * Note that if operating on a hal/quarter rate channel * this is a noop as those rates sets are different * and used instead. */ if (mode == IEEE80211_MODE_11G || mode == IEEE80211_MODE_11B) ieee80211_setbasicrates(&ic->ic_sup_rates[mode], mode); ic->ic_curmode = mode; ieee80211_reset_erp(ic); /* reset global ERP state */ return 0; } /* * Return the phy mode for with the specified channel. */ enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *chan) { if (IEEE80211_IS_CHAN_VHT_2GHZ(chan)) return IEEE80211_MODE_VHT_2GHZ; else if (IEEE80211_IS_CHAN_VHT_5GHZ(chan)) return IEEE80211_MODE_VHT_5GHZ; else if (IEEE80211_IS_CHAN_HTA(chan)) return IEEE80211_MODE_11NA; else if (IEEE80211_IS_CHAN_HTG(chan)) return IEEE80211_MODE_11NG; else if (IEEE80211_IS_CHAN_108G(chan)) return IEEE80211_MODE_TURBO_G; else if (IEEE80211_IS_CHAN_ST(chan)) return IEEE80211_MODE_STURBO_A; else if (IEEE80211_IS_CHAN_TURBO(chan)) return IEEE80211_MODE_TURBO_A; else if (IEEE80211_IS_CHAN_HALF(chan)) return IEEE80211_MODE_HALF; else if (IEEE80211_IS_CHAN_QUARTER(chan)) return IEEE80211_MODE_QUARTER; else if (IEEE80211_IS_CHAN_A(chan)) return IEEE80211_MODE_11A; else if (IEEE80211_IS_CHAN_ANYG(chan)) return IEEE80211_MODE_11G; else if (IEEE80211_IS_CHAN_B(chan)) return IEEE80211_MODE_11B; else if (IEEE80211_IS_CHAN_FHSS(chan)) return IEEE80211_MODE_FH; /* NB: should not get here */ printf("%s: cannot map channel to mode; freq %u flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); return IEEE80211_MODE_11B; } struct ratemedia { u_int match; /* rate + mode */ u_int media; /* if_media rate */ }; static int findmedia(const struct ratemedia rates[], int n, u_int match) { int i; for (i = 0; i < n; i++) if (rates[i].match == match) return rates[i].media; return IFM_AUTO; } /* * Convert IEEE80211 rate value to ifmedia subtype. * Rate is either a legacy rate in units of 0.5Mbps * or an MCS index. */ int ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode) { static const struct ratemedia rates[] = { { 2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 }, { 4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 }, { 2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 }, { 4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 }, { 11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 }, { 22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 }, { 44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 }, { 12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 }, { 18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 }, { 24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 }, { 36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 }, { 48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 }, { 72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 }, { 96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 }, { 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 }, { 2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 }, { 4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 }, { 11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 }, { 22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 }, { 12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 }, { 18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 }, { 24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 }, { 36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 }, { 48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 }, { 72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 }, { 96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 }, { 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 }, { 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 }, { 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 }, { 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 }, /* NB: OFDM72 doesn't really exist so we don't handle it */ }; static const struct ratemedia htrates[] = { { 0, IFM_IEEE80211_MCS }, { 1, IFM_IEEE80211_MCS }, { 2, IFM_IEEE80211_MCS }, { 3, IFM_IEEE80211_MCS }, { 4, IFM_IEEE80211_MCS }, { 5, IFM_IEEE80211_MCS }, { 6, IFM_IEEE80211_MCS }, { 7, IFM_IEEE80211_MCS }, { 8, IFM_IEEE80211_MCS }, { 9, IFM_IEEE80211_MCS }, { 10, IFM_IEEE80211_MCS }, { 11, IFM_IEEE80211_MCS }, { 12, IFM_IEEE80211_MCS }, { 13, IFM_IEEE80211_MCS }, { 14, IFM_IEEE80211_MCS }, { 15, IFM_IEEE80211_MCS }, { 16, IFM_IEEE80211_MCS }, { 17, IFM_IEEE80211_MCS }, { 18, IFM_IEEE80211_MCS }, { 19, IFM_IEEE80211_MCS }, { 20, IFM_IEEE80211_MCS }, { 21, IFM_IEEE80211_MCS }, { 22, IFM_IEEE80211_MCS }, { 23, IFM_IEEE80211_MCS }, { 24, IFM_IEEE80211_MCS }, { 25, IFM_IEEE80211_MCS }, { 26, IFM_IEEE80211_MCS }, { 27, IFM_IEEE80211_MCS }, { 28, IFM_IEEE80211_MCS }, { 29, IFM_IEEE80211_MCS }, { 30, IFM_IEEE80211_MCS }, { 31, IFM_IEEE80211_MCS }, { 32, IFM_IEEE80211_MCS }, { 33, IFM_IEEE80211_MCS }, { 34, IFM_IEEE80211_MCS }, { 35, IFM_IEEE80211_MCS }, { 36, IFM_IEEE80211_MCS }, { 37, IFM_IEEE80211_MCS }, { 38, IFM_IEEE80211_MCS }, { 39, IFM_IEEE80211_MCS }, { 40, IFM_IEEE80211_MCS }, { 41, IFM_IEEE80211_MCS }, { 42, IFM_IEEE80211_MCS }, { 43, IFM_IEEE80211_MCS }, { 44, IFM_IEEE80211_MCS }, { 45, IFM_IEEE80211_MCS }, { 46, IFM_IEEE80211_MCS }, { 47, IFM_IEEE80211_MCS }, { 48, IFM_IEEE80211_MCS }, { 49, IFM_IEEE80211_MCS }, { 50, IFM_IEEE80211_MCS }, { 51, IFM_IEEE80211_MCS }, { 52, IFM_IEEE80211_MCS }, { 53, IFM_IEEE80211_MCS }, { 54, IFM_IEEE80211_MCS }, { 55, IFM_IEEE80211_MCS }, { 56, IFM_IEEE80211_MCS }, { 57, IFM_IEEE80211_MCS }, { 58, IFM_IEEE80211_MCS }, { 59, IFM_IEEE80211_MCS }, { 60, IFM_IEEE80211_MCS }, { 61, IFM_IEEE80211_MCS }, { 62, IFM_IEEE80211_MCS }, { 63, IFM_IEEE80211_MCS }, { 64, IFM_IEEE80211_MCS }, { 65, IFM_IEEE80211_MCS }, { 66, IFM_IEEE80211_MCS }, { 67, IFM_IEEE80211_MCS }, { 68, IFM_IEEE80211_MCS }, { 69, IFM_IEEE80211_MCS }, { 70, IFM_IEEE80211_MCS }, { 71, IFM_IEEE80211_MCS }, { 72, IFM_IEEE80211_MCS }, { 73, IFM_IEEE80211_MCS }, { 74, IFM_IEEE80211_MCS }, { 75, IFM_IEEE80211_MCS }, { 76, IFM_IEEE80211_MCS }, }; static const struct ratemedia vhtrates[] = { { 0, IFM_IEEE80211_VHT }, { 1, IFM_IEEE80211_VHT }, { 2, IFM_IEEE80211_VHT }, { 3, IFM_IEEE80211_VHT }, { 4, IFM_IEEE80211_VHT }, { 5, IFM_IEEE80211_VHT }, { 6, IFM_IEEE80211_VHT }, { 7, IFM_IEEE80211_VHT }, { 8, IFM_IEEE80211_VHT }, /* Optional. */ { 9, IFM_IEEE80211_VHT }, /* Optional. */ #if 0 /* Some QCA and BRCM seem to support this; offspec. */ { 10, IFM_IEEE80211_VHT }, { 11, IFM_IEEE80211_VHT }, #endif }; int m; /* * Check 11ac/11n rates first for match as an MCS. */ if (mode == IEEE80211_MODE_VHT_5GHZ) { if (rate & IFM_IEEE80211_VHT) { rate &= ~IFM_IEEE80211_VHT; m = findmedia(vhtrates, nitems(vhtrates), rate); if (m != IFM_AUTO) return (m | IFM_IEEE80211_VHT); } } else if (mode == IEEE80211_MODE_11NA) { if (rate & IEEE80211_RATE_MCS) { rate &= ~IEEE80211_RATE_MCS; m = findmedia(htrates, nitems(htrates), rate); if (m != IFM_AUTO) return m | IFM_IEEE80211_11NA; } } else if (mode == IEEE80211_MODE_11NG) { /* NB: 12 is ambiguous, it will be treated as an MCS */ if (rate & IEEE80211_RATE_MCS) { rate &= ~IEEE80211_RATE_MCS; m = findmedia(htrates, nitems(htrates), rate); if (m != IFM_AUTO) return m | IFM_IEEE80211_11NG; } } rate &= IEEE80211_RATE_VAL; switch (mode) { case IEEE80211_MODE_11A: case IEEE80211_MODE_HALF: /* XXX good 'nuf */ case IEEE80211_MODE_QUARTER: case IEEE80211_MODE_11NA: case IEEE80211_MODE_TURBO_A: case IEEE80211_MODE_STURBO_A: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11A); case IEEE80211_MODE_11B: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11B); case IEEE80211_MODE_FH: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_FH); case IEEE80211_MODE_AUTO: /* NB: ic may be NULL for some drivers */ if (ic != NULL && ic->ic_phytype == IEEE80211_T_FH) return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_FH); /* NB: hack, 11g matches both 11b+11a rates */ /* fall thru... */ case IEEE80211_MODE_11G: case IEEE80211_MODE_11NG: case IEEE80211_MODE_TURBO_G: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11G); case IEEE80211_MODE_VHT_2GHZ: case IEEE80211_MODE_VHT_5GHZ: /* XXX TODO: need to figure out mapping for VHT rates */ return IFM_AUTO; } return IFM_AUTO; } int ieee80211_media2rate(int mword) { static const int ieeerates[] = { -1, /* IFM_AUTO */ 0, /* IFM_MANUAL */ 0, /* IFM_NONE */ 2, /* IFM_IEEE80211_FH1 */ 4, /* IFM_IEEE80211_FH2 */ 2, /* IFM_IEEE80211_DS1 */ 4, /* IFM_IEEE80211_DS2 */ 11, /* IFM_IEEE80211_DS5 */ 22, /* IFM_IEEE80211_DS11 */ 44, /* IFM_IEEE80211_DS22 */ 12, /* IFM_IEEE80211_OFDM6 */ 18, /* IFM_IEEE80211_OFDM9 */ 24, /* IFM_IEEE80211_OFDM12 */ 36, /* IFM_IEEE80211_OFDM18 */ 48, /* IFM_IEEE80211_OFDM24 */ 72, /* IFM_IEEE80211_OFDM36 */ 96, /* IFM_IEEE80211_OFDM48 */ 108, /* IFM_IEEE80211_OFDM54 */ 144, /* IFM_IEEE80211_OFDM72 */ 0, /* IFM_IEEE80211_DS354k */ 0, /* IFM_IEEE80211_DS512k */ 6, /* IFM_IEEE80211_OFDM3 */ 9, /* IFM_IEEE80211_OFDM4 */ 54, /* IFM_IEEE80211_OFDM27 */ -1, /* IFM_IEEE80211_MCS */ -1, /* IFM_IEEE80211_VHT */ }; return IFM_SUBTYPE(mword) < nitems(ieeerates) ? ieeerates[IFM_SUBTYPE(mword)] : 0; } /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) uint32_t ieee80211_mac_hash(const struct ieee80211com *ic, const uint8_t addr[IEEE80211_ADDR_LEN]) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = ic->ic_hash_key; b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); return c; } #undef mix char ieee80211_channel_type_char(const struct ieee80211_channel *c) { if (IEEE80211_IS_CHAN_ST(c)) return 'S'; if (IEEE80211_IS_CHAN_108A(c)) return 'T'; if (IEEE80211_IS_CHAN_108G(c)) return 'G'; if (IEEE80211_IS_CHAN_VHT(c)) return 'v'; if (IEEE80211_IS_CHAN_HT(c)) return 'n'; if (IEEE80211_IS_CHAN_A(c)) return 'a'; if (IEEE80211_IS_CHAN_ANYG(c)) return 'g'; if (IEEE80211_IS_CHAN_B(c)) return 'b'; return 'f'; } diff --git a/sys/net80211/ieee80211_crypto_ccmp.c b/sys/net80211/ieee80211_crypto_ccmp.c index c4423e8a4ca9..a8ef3dab7635 100644 --- a/sys/net80211/ieee80211_crypto_ccmp.c +++ b/sys/net80211/ieee80211_crypto_ccmp.c @@ -1,683 +1,683 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11i AES-CCMP crypto support. * * Part of this module is derived from similar code in the Host * AP driver. The code is used with the consent of the author and * it's license is included below. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #define AES_BLOCK_LEN 16 struct ccmp_ctx { struct ieee80211vap *cc_vap; /* for diagnostics+statistics */ struct ieee80211com *cc_ic; rijndael_ctx cc_aes; }; static void *ccmp_attach(struct ieee80211vap *, struct ieee80211_key *); static void ccmp_detach(struct ieee80211_key *); static int ccmp_setkey(struct ieee80211_key *); static void ccmp_setiv(struct ieee80211_key *, uint8_t *); static int ccmp_encap(struct ieee80211_key *, struct mbuf *); static int ccmp_decap(struct ieee80211_key *, struct mbuf *, int); static int ccmp_enmic(struct ieee80211_key *, struct mbuf *, int); static int ccmp_demic(struct ieee80211_key *, struct mbuf *, int); static const struct ieee80211_cipher ccmp = { .ic_name = "AES-CCM", .ic_cipher = IEEE80211_CIPHER_AES_CCM, .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, .ic_trailer = IEEE80211_WEP_MICLEN, .ic_miclen = 0, .ic_attach = ccmp_attach, .ic_detach = ccmp_detach, .ic_setkey = ccmp_setkey, .ic_setiv = ccmp_setiv, .ic_encap = ccmp_encap, .ic_decap = ccmp_decap, .ic_enmic = ccmp_enmic, .ic_demic = ccmp_demic, }; static int ccmp_encrypt(struct ieee80211_key *, struct mbuf *, int hdrlen); static int ccmp_decrypt(struct ieee80211_key *, u_int64_t pn, struct mbuf *, int hdrlen); /* number of references from net80211 layer */ static int nrefs = 0; static void * ccmp_attach(struct ieee80211vap *vap, struct ieee80211_key *k) { struct ccmp_ctx *ctx; ctx = (struct ccmp_ctx *) IEEE80211_MALLOC(sizeof(struct ccmp_ctx), M_80211_CRYPTO, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (ctx == NULL) { vap->iv_stats.is_crypto_nomem++; return NULL; } ctx->cc_vap = vap; ctx->cc_ic = vap->iv_ic; nrefs++; /* NB: we assume caller locking */ return ctx; } static void ccmp_detach(struct ieee80211_key *k) { struct ccmp_ctx *ctx = k->wk_private; IEEE80211_FREE(ctx, M_80211_CRYPTO); KASSERT(nrefs > 0, ("imbalanced attach/detach")); nrefs--; /* NB: we assume caller locking */ } static int ccmp_setkey(struct ieee80211_key *k) { struct ccmp_ctx *ctx = k->wk_private; if (k->wk_keylen != (128/NBBY)) { IEEE80211_DPRINTF(ctx->cc_vap, IEEE80211_MSG_CRYPTO, "%s: Invalid key length %u, expecting %u\n", __func__, k->wk_keylen, 128/NBBY); return 0; } if (k->wk_flags & IEEE80211_KEY_SWENCRYPT) rijndael_set_key(&ctx->cc_aes, k->wk_key, k->wk_keylen*NBBY); return 1; } static void ccmp_setiv(struct ieee80211_key *k, uint8_t *ivp) { struct ccmp_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->cc_vap; uint8_t keyid; keyid = ieee80211_crypto_get_keyid(vap, k) << 6; k->wk_keytsc++; ivp[0] = k->wk_keytsc >> 0; /* PN0 */ ivp[1] = k->wk_keytsc >> 8; /* PN1 */ ivp[2] = 0; /* Reserved */ ivp[3] = keyid | IEEE80211_WEP_EXTIV; /* KeyID | ExtID */ ivp[4] = k->wk_keytsc >> 16; /* PN2 */ ivp[5] = k->wk_keytsc >> 24; /* PN3 */ ivp[6] = k->wk_keytsc >> 32; /* PN4 */ ivp[7] = k->wk_keytsc >> 40; /* PN5 */ } /* * Add privacy headers appropriate for the specified key. */ static int ccmp_encap(struct ieee80211_key *k, struct mbuf *m) { const struct ieee80211_frame *wh; struct ccmp_ctx *ctx = k->wk_private; struct ieee80211com *ic = ctx->cc_ic; uint8_t *ivp; int hdrlen; int is_mgmt; hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); wh = mtod(m, const struct ieee80211_frame *); is_mgmt = IEEE80211_IS_MGMT(wh); /* * Check to see if we need to insert IV/MIC. * * Some offload devices don't require the IV to be inserted * as part of the hardware encryption. */ if (is_mgmt && (k->wk_flags & IEEE80211_KEY_NOIVMGT)) return 1; if ((! is_mgmt) && (k->wk_flags & IEEE80211_KEY_NOIV)) return 1; /* * Copy down 802.11 header and add the IV, KeyID, and ExtIV. */ - M_PREPEND(m, ccmp.ic_header, M_NOWAIT); + M_PREPEND(m, ccmp.ic_header, IEEE80211_M_NOWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); ovbcopy(ivp + ccmp.ic_header, ivp, hdrlen); ivp += hdrlen; ccmp_setiv(k, ivp); /* * Finally, do software encrypt if needed. */ if ((k->wk_flags & IEEE80211_KEY_SWENCRYPT) && !ccmp_encrypt(k, m, hdrlen)) return 0; return 1; } /* * Add MIC to the frame as needed. */ static int ccmp_enmic(struct ieee80211_key *k, struct mbuf *m, int force) { return 1; } static __inline uint64_t READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) { uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24); uint16_t iv16 = (b4 << 0) | (b5 << 8); return (((uint64_t)iv16) << 32) | iv32; } /* * Validate and strip privacy headers (and trailer) for a * received frame. The specified key should be correct but * is also verified. */ static int ccmp_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen) { const struct ieee80211_rx_stats *rxs; struct ccmp_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->cc_vap; struct ieee80211_frame *wh; uint8_t *ivp, tid; uint64_t pn; rxs = ieee80211_get_rx_params_ptr(m); if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP)) goto finish; /* * Header should have extended IV and sequence number; * verify the former and validate the latter. */ wh = mtod(m, struct ieee80211_frame *); ivp = mtod(m, uint8_t *) + hdrlen; if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) { /* * No extended IV; discard frame. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s", "missing ExtIV for AES-CCM cipher"); vap->iv_stats.is_rx_ccmpformat++; return 0; } tid = ieee80211_gettid(wh); pn = READ_6(ivp[0], ivp[1], ivp[4], ivp[5], ivp[6], ivp[7]); if (pn <= k->wk_keyrsc[tid] && (k->wk_flags & IEEE80211_KEY_NOREPLAY) == 0) { /* * Replay violation. */ ieee80211_notify_replay_failure(vap, wh, k, pn, tid); vap->iv_stats.is_rx_ccmpreplay++; return 0; } /* * Check if the device handled the decrypt in hardware. * If so we just strip the header; otherwise we need to * handle the decrypt in software. Note that for the * latter we leave the header in place for use in the * decryption work. */ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) && !ccmp_decrypt(k, pn, m, hdrlen)) return 0; finish: /* * Copy up 802.11 header and strip crypto bits. */ if (! ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP))) { ovbcopy(mtod(m, void *), mtod(m, uint8_t *) + ccmp.ic_header, hdrlen); m_adj(m, ccmp.ic_header); } /* * XXX TODO: see if MMIC_STRIP also covers CCMP MIC trailer. */ if (! ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_MMIC_STRIP))) m_adj(m, -ccmp.ic_trailer); /* * Ok to update rsc now. */ if (! ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP))) { k->wk_keyrsc[tid] = pn; } return 1; } /* * Verify and strip MIC from the frame. */ static int ccmp_demic(struct ieee80211_key *k, struct mbuf *m, int force) { return 1; } static __inline void xor_block(uint8_t *b, const uint8_t *a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } /* * Host AP crypt: host-based CCMP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. * * Alternatively, this software may be distributed under the terms of BSD * license. */ static void ccmp_init_blocks(rijndael_ctx *ctx, struct ieee80211_frame *wh, u_int64_t pn, size_t dlen, uint8_t b0[AES_BLOCK_LEN], uint8_t aad[2 * AES_BLOCK_LEN], uint8_t auth[AES_BLOCK_LEN], uint8_t s0[AES_BLOCK_LEN]) { #define IS_QOS_DATA(wh) IEEE80211_QOS_HAS_SEQ(wh) /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; /* NB: b0[1] set below */ IEEE80211_ADDR_COPY(b0 + 2, wh->i_addr2); b0[8] = pn >> 40; b0[9] = pn >> 32; b0[10] = pn >> 24; b0[11] = pn >> 16; b0[12] = pn >> 8; b0[13] = pn >> 0; b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ aad[0] = 0; /* AAD length >> 8 */ /* NB: aad[1] set below */ aad[2] = wh->i_fc[0] & 0x8f; /* XXX magic #s */ aad[3] = wh->i_fc[1] & 0xc7; /* XXX magic #s */ /* NB: we know 3 addresses are contiguous */ memcpy(aad + 4, wh->i_addr1, 3 * IEEE80211_ADDR_LEN); aad[22] = wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK; aad[23] = 0; /* all bits masked */ /* * Construct variable-length portion of AAD based * on whether this is a 4-address frame/QOS frame. * We always zero-pad to 32 bytes before running it * through the cipher. * * We also fill in the priority bits of the CCM * initial block as we know whether or not we have * a QOS frame. */ if (IEEE80211_IS_DSTODS(wh)) { IEEE80211_ADDR_COPY(aad + 24, ((struct ieee80211_frame_addr4 *)wh)->i_addr4); if (IS_QOS_DATA(wh)) { struct ieee80211_qosframe_addr4 *qwh4 = (struct ieee80211_qosframe_addr4 *) wh; aad[30] = qwh4->i_qos[0] & 0x0f;/* just priority bits */ aad[31] = 0; b0[1] = aad[30]; aad[1] = 22 + IEEE80211_ADDR_LEN + 2; } else { *(uint16_t *)&aad[30] = 0; b0[1] = 0; aad[1] = 22 + IEEE80211_ADDR_LEN; } } else { if (IS_QOS_DATA(wh)) { struct ieee80211_qosframe *qwh = (struct ieee80211_qosframe*) wh; aad[24] = qwh->i_qos[0] & 0x0f; /* just priority bits */ aad[25] = 0; b0[1] = aad[24]; aad[1] = 22 + 2; } else { *(uint16_t *)&aad[24] = 0; b0[1] = 0; aad[1] = 22; } *(uint16_t *)&aad[26] = 0; *(uint32_t *)&aad[28] = 0; } /* Start with the first block and AAD */ rijndael_encrypt(ctx, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); rijndael_encrypt(ctx, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); rijndael_encrypt(ctx, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; rijndael_encrypt(ctx, b0, s0); #undef IS_QOS_DATA } #define CCMP_ENCRYPT(_i, _b, _b0, _pos, _e, _len) do { \ /* Authentication */ \ xor_block(_b, _pos, _len); \ rijndael_encrypt(&ctx->cc_aes, _b, _b); \ /* Encryption, with counter */ \ _b0[14] = (_i >> 8) & 0xff; \ _b0[15] = _i & 0xff; \ rijndael_encrypt(&ctx->cc_aes, _b0, _e); \ xor_block(_pos, _e, _len); \ } while (0) static int ccmp_encrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen) { struct ccmp_ctx *ctx = key->wk_private; struct ieee80211_frame *wh; struct mbuf *m = m0; int data_len, i, space; uint8_t aad[2 * AES_BLOCK_LEN], b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN], e[AES_BLOCK_LEN], s0[AES_BLOCK_LEN]; uint8_t *pos; ctx->cc_vap->iv_stats.is_crypto_ccmp++; wh = mtod(m, struct ieee80211_frame *); data_len = m->m_pkthdr.len - (hdrlen + ccmp.ic_header); ccmp_init_blocks(&ctx->cc_aes, wh, key->wk_keytsc, data_len, b0, aad, b, s0); i = 1; pos = mtod(m, uint8_t *) + hdrlen + ccmp.ic_header; /* NB: assumes header is entirely in first mbuf */ space = m->m_len - (hdrlen + ccmp.ic_header); for (;;) { if (space > data_len) space = data_len; /* * Do full blocks. */ while (space >= AES_BLOCK_LEN) { CCMP_ENCRYPT(i, b, b0, pos, e, AES_BLOCK_LEN); pos += AES_BLOCK_LEN, space -= AES_BLOCK_LEN; data_len -= AES_BLOCK_LEN; i++; } if (data_len <= 0) /* no more data */ break; m = m->m_next; if (m == NULL) { /* last buffer */ if (space != 0) { /* * Short last block. */ CCMP_ENCRYPT(i, b, b0, pos, e, space); } break; } if (space != 0) { uint8_t *pos_next; int space_next; int len, dl, sp; struct mbuf *n; /* * Block straddles one or more mbufs, gather data * into the block buffer b, apply the cipher, then * scatter the results back into the mbuf chain. * The buffer will automatically get space bytes * of data at offset 0 copied in+out by the * CCMP_ENCRYPT request so we must take care of * the remaining data. */ n = m; dl = data_len; sp = space; for (;;) { pos_next = mtod(n, uint8_t *); len = min(dl, AES_BLOCK_LEN); space_next = len > sp ? len - sp : 0; if (n->m_len >= space_next) { /* * This mbuf has enough data; just grab * what we need and stop. */ xor_block(b+sp, pos_next, space_next); break; } /* * This mbuf's contents are insufficient, * take 'em all and prepare to advance to * the next mbuf. */ xor_block(b+sp, pos_next, n->m_len); sp += n->m_len, dl -= n->m_len; n = n->m_next; if (n == NULL) break; } CCMP_ENCRYPT(i, b, b0, pos, e, space); /* NB: just like above, but scatter data to mbufs */ dl = data_len; sp = space; for (;;) { pos_next = mtod(m, uint8_t *); len = min(dl, AES_BLOCK_LEN); space_next = len > sp ? len - sp : 0; if (m->m_len >= space_next) { xor_block(pos_next, e+sp, space_next); break; } xor_block(pos_next, e+sp, m->m_len); sp += m->m_len, dl -= m->m_len; m = m->m_next; if (m == NULL) goto done; } /* * Do bookkeeping. m now points to the last mbuf * we grabbed data from. We know we consumed a * full block of data as otherwise we'd have hit * the end of the mbuf chain, so deduct from data_len. * Otherwise advance the block number (i) and setup * pos+space to reflect contents of the new mbuf. */ data_len -= AES_BLOCK_LEN; i++; pos = pos_next + space_next; space = m->m_len - space_next; } else { /* * Setup for next buffer. */ pos = mtod(m, uint8_t *); space = m->m_len; } } done: /* tack on MIC */ xor_block(b, s0, ccmp.ic_trailer); return m_append(m0, ccmp.ic_trailer, b); } #undef CCMP_ENCRYPT #define CCMP_DECRYPT(_i, _b, _b0, _pos, _a, _len) do { \ /* Decrypt, with counter */ \ _b0[14] = (_i >> 8) & 0xff; \ _b0[15] = _i & 0xff; \ rijndael_encrypt(&ctx->cc_aes, _b0, _b); \ xor_block(_pos, _b, _len); \ /* Authentication */ \ xor_block(_a, _pos, _len); \ rijndael_encrypt(&ctx->cc_aes, _a, _a); \ } while (0) static int ccmp_decrypt(struct ieee80211_key *key, u_int64_t pn, struct mbuf *m, int hdrlen) { struct ccmp_ctx *ctx = key->wk_private; struct ieee80211vap *vap = ctx->cc_vap; struct ieee80211_frame *wh; uint8_t aad[2 * AES_BLOCK_LEN]; uint8_t b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN], a[AES_BLOCK_LEN]; uint8_t mic[AES_BLOCK_LEN]; size_t data_len; int i; uint8_t *pos; u_int space; ctx->cc_vap->iv_stats.is_crypto_ccmp++; wh = mtod(m, struct ieee80211_frame *); data_len = m->m_pkthdr.len - (hdrlen + ccmp.ic_header + ccmp.ic_trailer); ccmp_init_blocks(&ctx->cc_aes, wh, pn, data_len, b0, aad, a, b); m_copydata(m, m->m_pkthdr.len - ccmp.ic_trailer, ccmp.ic_trailer, mic); xor_block(mic, b, ccmp.ic_trailer); i = 1; pos = mtod(m, uint8_t *) + hdrlen + ccmp.ic_header; space = m->m_len - (hdrlen + ccmp.ic_header); for (;;) { if (space > data_len) space = data_len; while (space >= AES_BLOCK_LEN) { CCMP_DECRYPT(i, b, b0, pos, a, AES_BLOCK_LEN); pos += AES_BLOCK_LEN, space -= AES_BLOCK_LEN; data_len -= AES_BLOCK_LEN; i++; } if (data_len <= 0) /* no more data */ break; m = m->m_next; if (m == NULL) { /* last buffer */ if (space != 0) /* short last block */ CCMP_DECRYPT(i, b, b0, pos, a, space); break; } if (space != 0) { uint8_t *pos_next; u_int space_next; u_int len; /* * Block straddles buffers, split references. We * do not handle splits that require >2 buffers * since rx'd frames are never badly fragmented * because drivers typically recv in clusters. */ pos_next = mtod(m, uint8_t *); len = min(data_len, AES_BLOCK_LEN); space_next = len > space ? len - space : 0; KASSERT(m->m_len >= space_next, ("not enough data in following buffer, " "m_len %u need %u\n", m->m_len, space_next)); xor_block(b+space, pos_next, space_next); CCMP_DECRYPT(i, b, b0, pos, a, space); xor_block(pos_next, b+space, space_next); data_len -= len; i++; pos = pos_next + space_next; space = m->m_len - space_next; } else { /* * Setup for next buffer. */ pos = mtod(m, uint8_t *); space = m->m_len; } } if (memcmp(mic, a, ccmp.ic_trailer) != 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s", "AES-CCM decrypt failed; MIC mismatch"); vap->iv_stats.is_rx_ccmpmic++; return 0; } return 1; } #undef CCMP_DECRYPT /* * Module glue. */ IEEE80211_CRYPTO_MODULE(ccmp, 1); diff --git a/sys/net80211/ieee80211_crypto_tkip.c b/sys/net80211/ieee80211_crypto_tkip.c index 59230d3538a7..cfd1392e2075 100644 --- a/sys/net80211/ieee80211_crypto_tkip.c +++ b/sys/net80211/ieee80211_crypto_tkip.c @@ -1,1071 +1,1071 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11i TKIP crypto support. * * Part of this module is derived from similar code in the Host * AP driver. The code is used with the consent of the author and * it's license is included below. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include static void *tkip_attach(struct ieee80211vap *, struct ieee80211_key *); static void tkip_detach(struct ieee80211_key *); static int tkip_setkey(struct ieee80211_key *); static void tkip_setiv(struct ieee80211_key *, uint8_t *); static int tkip_encap(struct ieee80211_key *, struct mbuf *); static int tkip_enmic(struct ieee80211_key *, struct mbuf *, int); static int tkip_decap(struct ieee80211_key *, struct mbuf *, int); static int tkip_demic(struct ieee80211_key *, struct mbuf *, int); static const struct ieee80211_cipher tkip = { .ic_name = "TKIP", .ic_cipher = IEEE80211_CIPHER_TKIP, .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, .ic_trailer = IEEE80211_WEP_CRCLEN, .ic_miclen = IEEE80211_WEP_MICLEN, .ic_attach = tkip_attach, .ic_detach = tkip_detach, .ic_setkey = tkip_setkey, .ic_setiv = tkip_setiv, .ic_encap = tkip_encap, .ic_decap = tkip_decap, .ic_enmic = tkip_enmic, .ic_demic = tkip_demic, }; typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t __u32; typedef uint32_t u32; struct tkip_ctx { struct ieee80211vap *tc_vap; /* for diagnostics+statistics */ u16 tx_ttak[5]; u8 tx_rc4key[16]; /* XXX for test module; make locals? */ u16 rx_ttak[5]; int rx_phase1_done; u8 rx_rc4key[16]; /* XXX for test module; make locals? */ uint64_t rx_rsc; /* held until MIC verified */ }; static void michael_mic(struct tkip_ctx *, const u8 *key, struct mbuf *m, u_int off, size_t data_len, u8 mic[IEEE80211_WEP_MICLEN]); static int tkip_encrypt(struct tkip_ctx *, struct ieee80211_key *, struct mbuf *, int hdr_len); static int tkip_decrypt(struct tkip_ctx *, struct ieee80211_key *, struct mbuf *, int hdr_len); /* number of references from net80211 layer */ static int nrefs = 0; static void * tkip_attach(struct ieee80211vap *vap, struct ieee80211_key *k) { struct tkip_ctx *ctx; ctx = (struct tkip_ctx *) IEEE80211_MALLOC(sizeof(struct tkip_ctx), M_80211_CRYPTO, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (ctx == NULL) { vap->iv_stats.is_crypto_nomem++; return NULL; } ctx->tc_vap = vap; nrefs++; /* NB: we assume caller locking */ return ctx; } static void tkip_detach(struct ieee80211_key *k) { struct tkip_ctx *ctx = k->wk_private; IEEE80211_FREE(ctx, M_80211_CRYPTO); KASSERT(nrefs > 0, ("imbalanced attach/detach")); nrefs--; /* NB: we assume caller locking */ } static int tkip_setkey(struct ieee80211_key *k) { struct tkip_ctx *ctx = k->wk_private; if (k->wk_keylen != (128/NBBY)) { (void) ctx; /* XXX */ IEEE80211_DPRINTF(ctx->tc_vap, IEEE80211_MSG_CRYPTO, "%s: Invalid key length %u, expecting %u\n", __func__, k->wk_keylen, 128/NBBY); return 0; } ctx->rx_phase1_done = 0; return 1; } static void tkip_setiv(struct ieee80211_key *k, uint8_t *ivp) { struct tkip_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->tc_vap; uint8_t keyid; keyid = ieee80211_crypto_get_keyid(vap, k) << 6; k->wk_keytsc++; ivp[0] = k->wk_keytsc >> 8; /* TSC1 */ ivp[1] = (ivp[0] | 0x20) & 0x7f; /* WEP seed */ ivp[2] = k->wk_keytsc >> 0; /* TSC0 */ ivp[3] = keyid | IEEE80211_WEP_EXTIV; /* KeyID | ExtID */ ivp[4] = k->wk_keytsc >> 16; /* TSC2 */ ivp[5] = k->wk_keytsc >> 24; /* TSC3 */ ivp[6] = k->wk_keytsc >> 32; /* TSC4 */ ivp[7] = k->wk_keytsc >> 40; /* TSC5 */ } /* * Add privacy headers and do any s/w encryption required. */ static int tkip_encap(struct ieee80211_key *k, struct mbuf *m) { struct tkip_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->tc_vap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame *wh; uint8_t *ivp; int hdrlen; int is_mgmt; wh = mtod(m, struct ieee80211_frame *); is_mgmt = IEEE80211_IS_MGMT(wh); /* * Handle TKIP counter measures requirement. */ if (vap->iv_flags & IEEE80211_F_COUNTERM) { #ifdef IEEE80211_DEBUG struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); #endif IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "discard frame due to countermeasures (%s)", __func__); vap->iv_stats.is_crypto_tkipcm++; return 0; } /* * Check to see whether IV needs to be included. */ if (is_mgmt && (k->wk_flags & IEEE80211_KEY_NOIVMGT)) return 1; if ((! is_mgmt) && (k->wk_flags & IEEE80211_KEY_NOIV)) return 1; hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); /* * Copy down 802.11 header and add the IV, KeyID, and ExtIV. */ - M_PREPEND(m, tkip.ic_header, M_NOWAIT); + M_PREPEND(m, tkip.ic_header, IEEE80211_M_NOWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); memmove(ivp, ivp + tkip.ic_header, hdrlen); ivp += hdrlen; tkip_setiv(k, ivp); /* * Finally, do software encrypt if needed. */ if ((k->wk_flags & IEEE80211_KEY_SWENCRYPT) && !tkip_encrypt(ctx, k, m, hdrlen)) return 0; return 1; } /* * Add MIC to the frame as needed. */ static int tkip_enmic(struct ieee80211_key *k, struct mbuf *m, int force) { struct tkip_ctx *ctx = k->wk_private; struct ieee80211_frame *wh; int is_mgmt; wh = mtod(m, struct ieee80211_frame *); is_mgmt = IEEE80211_IS_MGMT(wh); /* * Check to see whether MIC needs to be included. */ if (is_mgmt && (k->wk_flags & IEEE80211_KEY_NOMICMGT)) return 1; if ((! is_mgmt) && (k->wk_flags & IEEE80211_KEY_NOMIC)) return 1; if (force || (k->wk_flags & IEEE80211_KEY_SWENMIC)) { struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); struct ieee80211vap *vap = ctx->tc_vap; struct ieee80211com *ic = vap->iv_ic; int hdrlen; uint8_t mic[IEEE80211_WEP_MICLEN]; vap->iv_stats.is_crypto_tkipenmic++; hdrlen = ieee80211_hdrspace(ic, wh); michael_mic(ctx, k->wk_txmic, m, hdrlen, m->m_pkthdr.len - hdrlen, mic); return m_append(m, tkip.ic_miclen, mic); } return 1; } static __inline uint64_t READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) { uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24); uint16_t iv16 = (b4 << 0) | (b5 << 8); return (((uint64_t)iv16) << 32) | iv32; } /* * Validate and strip privacy headers (and trailer) for a * received frame. If necessary, decrypt the frame using * the specified key. */ static int tkip_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen) { const struct ieee80211_rx_stats *rxs; struct tkip_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->tc_vap; struct ieee80211_frame *wh; uint8_t *ivp, tid; rxs = ieee80211_get_rx_params_ptr(m); /* * If IV has been stripped, we skip most of the below. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP)) goto finish; /* * Header should have extended IV and sequence number; * verify the former and validate the latter. */ wh = mtod(m, struct ieee80211_frame *); ivp = mtod(m, uint8_t *) + hdrlen; if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) { /* * No extended IV; discard frame. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s", "missing ExtIV for TKIP cipher"); vap->iv_stats.is_rx_tkipformat++; return 0; } /* * Handle TKIP counter measures requirement. */ if (vap->iv_flags & IEEE80211_F_COUNTERM) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "discard frame due to countermeasures (%s)", __func__); vap->iv_stats.is_crypto_tkipcm++; return 0; } tid = ieee80211_gettid(wh); ctx->rx_rsc = READ_6(ivp[2], ivp[0], ivp[4], ivp[5], ivp[6], ivp[7]); if (ctx->rx_rsc <= k->wk_keyrsc[tid] && (k->wk_flags & IEEE80211_KEY_NOREPLAY) == 0) { /* * Replay violation; notify upper layer. */ ieee80211_notify_replay_failure(vap, wh, k, ctx->rx_rsc, tid); vap->iv_stats.is_rx_tkipreplay++; return 0; } /* * NB: We can't update the rsc in the key until MIC is verified. * * We assume we are not preempted between doing the check above * and updating wk_keyrsc when stripping the MIC in tkip_demic. * Otherwise we might process another packet and discard it as * a replay. */ /* * Check if the device handled the decrypt in hardware. * If so we just strip the header; otherwise we need to * handle the decrypt in software. */ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) && !tkip_decrypt(ctx, k, m, hdrlen)) return 0; finish: /* * Copy up 802.11 header and strip crypto bits - but only if we * are required to. */ if (! ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP))) { memmove(mtod(m, uint8_t *) + tkip.ic_header, mtod(m, void *), hdrlen); m_adj(m, tkip.ic_header); } /* * XXX TODO: do we need an option to potentially not strip the * WEP trailer? Does "MMIC_STRIP" also mean this? Or? */ m_adj(m, -tkip.ic_trailer); return 1; } /* * Verify and strip MIC from the frame. */ static int tkip_demic(struct ieee80211_key *k, struct mbuf *m, int force) { const struct ieee80211_rx_stats *rxs; struct tkip_ctx *ctx = k->wk_private; struct ieee80211_frame *wh; uint8_t tid; wh = mtod(m, struct ieee80211_frame *); rxs = ieee80211_get_rx_params_ptr(m); /* * If we are told about a MIC failure from the driver, * directly notify as a michael failure to the upper * layers. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_FAIL_MIC)) { struct ieee80211vap *vap = ctx->tc_vap; ieee80211_notify_michael_failure(vap, wh, k->wk_rxkeyix != IEEE80211_KEYIX_NONE ? k->wk_rxkeyix : k->wk_keyix); return 0; } /* * If IV has been stripped, we skip most of the below. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_MMIC_STRIP)) goto finish; if ((k->wk_flags & IEEE80211_KEY_SWDEMIC) || force) { struct ieee80211vap *vap = ctx->tc_vap; int hdrlen = ieee80211_hdrspace(vap->iv_ic, wh); u8 mic[IEEE80211_WEP_MICLEN]; u8 mic0[IEEE80211_WEP_MICLEN]; vap->iv_stats.is_crypto_tkipdemic++; michael_mic(ctx, k->wk_rxmic, m, hdrlen, m->m_pkthdr.len - (hdrlen + tkip.ic_miclen), mic); m_copydata(m, m->m_pkthdr.len - tkip.ic_miclen, tkip.ic_miclen, mic0); if (memcmp(mic, mic0, tkip.ic_miclen)) { /* NB: 802.11 layer handles statistic and debug msg */ ieee80211_notify_michael_failure(vap, wh, k->wk_rxkeyix != IEEE80211_KEYIX_NONE ? k->wk_rxkeyix : k->wk_keyix); return 0; } } /* * Strip MIC from the tail. */ m_adj(m, -tkip.ic_miclen); /* * Ok to update rsc now that MIC has been verified. */ tid = ieee80211_gettid(wh); k->wk_keyrsc[tid] = ctx->rx_rsc; finish: return 1; } /* * Host AP crypt: host-based TKIP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. * * Alternatively, this software may be distributed under the terms of BSD * license. */ static const __u32 crc32_table[256] = { 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, 0x2d02ef8dL }; static __inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } static __inline u8 Lo8(u16 val) { return val & 0xff; } static __inline u8 Hi8(u16 val) { return val >> 8; } static __inline u16 Lo16(u32 val) { return val & 0xffff; } static __inline u16 Hi16(u32 val) { return val >> 16; } static __inline u16 Mk16(u8 hi, u8 lo) { return lo | (((u16) hi) << 8); } static __inline u16 Mk16_le(const u16 *v) { return le16toh(*v); } static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static __inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } #define PHASE1_LOOP_COUNT 8 static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) { int i, j; /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ TTAK[0] = Lo16(IV32); TTAK[1] = Hi16(IV32); TTAK[2] = Mk16(TA[1], TA[0]); TTAK[3] = Mk16(TA[3], TA[2]); TTAK[4] = Mk16(TA[5], TA[4]); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; } } #ifndef _BYTE_ORDER #error "Don't know native byte order" #endif static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, u16 IV16) { /* Make temporary area overlap WEP seed so that the final copy can be * avoided on little endian hosts. */ u16 *PPK = (u16 *) &WEPSeed[4]; /* Step 1 - make copy of TTAK and bring in TSC */ PPK[0] = TTAK[0]; PPK[1] = TTAK[1]; PPK[2] = TTAK[2]; PPK[3] = TTAK[3]; PPK[4] = TTAK[4]; PPK[5] = TTAK[4] + IV16; /* Step 2 - 96-bit bijective mixing using S-box */ PPK[0] += _S_(PPK[5] ^ Mk16_le((const u16 *) &TK[0])); PPK[1] += _S_(PPK[0] ^ Mk16_le((const u16 *) &TK[2])); PPK[2] += _S_(PPK[1] ^ Mk16_le((const u16 *) &TK[4])); PPK[3] += _S_(PPK[2] ^ Mk16_le((const u16 *) &TK[6])); PPK[4] += _S_(PPK[3] ^ Mk16_le((const u16 *) &TK[8])); PPK[5] += _S_(PPK[4] ^ Mk16_le((const u16 *) &TK[10])); PPK[0] += RotR1(PPK[5] ^ Mk16_le((const u16 *) &TK[12])); PPK[1] += RotR1(PPK[0] ^ Mk16_le((const u16 *) &TK[14])); PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value * WEPSeed[0..2] is transmitted as WEP IV */ WEPSeed[0] = Hi8(IV16); WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; WEPSeed[2] = Lo8(IV16); WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((const u16 *) &TK[0])) >> 1); #if _BYTE_ORDER == _BIG_ENDIAN { int i; for (i = 0; i < 6; i++) PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); } #endif } static void wep_encrypt(u8 *key, struct mbuf *m0, u_int off, size_t data_len, uint8_t icv[IEEE80211_WEP_CRCLEN]) { u32 i, j, k, crc; size_t buflen; u8 S[256]; u8 *pos; struct mbuf *m; #define S_SWAP(a,b) do { u8 t = S[a]; S[a] = S[b]; S[b] = t; } while(0) /* Setup RC4 state */ for (i = 0; i < 256; i++) S[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + S[i] + key[i & 0x0f]) & 0xff; S_SWAP(i, j); } /* Compute CRC32 over unencrypted data and apply RC4 to data */ crc = ~0; i = j = 0; m = m0; pos = mtod(m, uint8_t *) + off; buflen = m->m_len - off; for (;;) { if (buflen > data_len) buflen = data_len; data_len -= buflen; for (k = 0; k < buflen; k++) { crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); *pos++ ^= S[(S[i] + S[j]) & 0xff]; } m = m->m_next; if (m == NULL) { KASSERT(data_len == 0, ("out of buffers with data_len %zu\n", data_len)); break; } pos = mtod(m, uint8_t *); buflen = m->m_len; } crc = ~crc; /* Append little-endian CRC32 and encrypt it to produce ICV */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); icv[k] ^= S[(S[i] + S[j]) & 0xff]; } } static int wep_decrypt(u8 *key, struct mbuf *m, u_int off, size_t data_len) { u32 i, j, k, crc; u8 S[256]; u8 *pos, icv[4]; size_t buflen; /* Setup RC4 state */ for (i = 0; i < 256; i++) S[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + S[i] + key[i & 0x0f]) & 0xff; S_SWAP(i, j); } /* Apply RC4 to data and compute CRC32 over decrypted data */ crc = ~0; i = j = 0; pos = mtod(m, uint8_t *) + off; buflen = m->m_len - off; for (;;) { if (buflen > data_len) buflen = data_len; data_len -= buflen; for (k = 0; k < buflen; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); *pos ^= S[(S[i] + S[j]) & 0xff]; crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8); pos++; } m = m->m_next; if (m == NULL) { KASSERT(data_len == 0, ("out of buffers with data_len %zu\n", data_len)); break; } pos = mtod(m, uint8_t *); buflen = m->m_len; } crc = ~crc; /* Encrypt little-endian CRC32 and verify that it matches with the * received ICV */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) { /* ICV mismatch - drop frame */ return -1; } } return 0; } static __inline u32 rotl(u32 val, int bits) { return (val << bits) | (val >> (32 - bits)); } static __inline u32 rotr(u32 val, int bits) { return (val >> bits) | (val << (32 - bits)); } static __inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rotl(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rotl(l, 3); \ l += r; \ r ^= rotr(l, 2); \ l += r; \ } while (0) static __inline u32 get_le32_split(u8 b0, u8 b1, u8 b2, u8 b3) { return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); } static __inline u32 get_le32(const u8 *p) { return get_le32_split(p[0], p[1], p[2], p[3]); } static __inline void put_le32(u8 *p, u32 v) { p[0] = v; p[1] = v >> 8; p[2] = v >> 16; p[3] = v >> 24; } /* * Craft pseudo header used to calculate the MIC. */ static void michael_mic_hdr(const struct ieee80211_frame *wh0, uint8_t hdr[16]) { const struct ieee80211_frame_addr4 *wh = (const struct ieee80211_frame_addr4 *) wh0; switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2); break; case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr3); break; case IEEE80211_FC1_DIR_DSTODS: IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr4); break; } if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { const struct ieee80211_qosframe *qwh = (const struct ieee80211_qosframe *) wh; hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; } else hdr[12] = 0; hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ } static void michael_mic(struct tkip_ctx *ctx, const u8 *key, struct mbuf *m, u_int off, size_t data_len, u8 mic[IEEE80211_WEP_MICLEN]) { uint8_t hdr[16]; u32 l, r; const uint8_t *data; u_int space; michael_mic_hdr(mtod(m, struct ieee80211_frame *), hdr); l = get_le32(key); r = get_le32(key + 4); /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ l ^= get_le32(hdr); michael_block(l, r); l ^= get_le32(&hdr[4]); michael_block(l, r); l ^= get_le32(&hdr[8]); michael_block(l, r); l ^= get_le32(&hdr[12]); michael_block(l, r); /* first buffer has special handling */ data = mtod(m, const uint8_t *) + off; space = m->m_len - off; for (;;) { if (space > data_len) space = data_len; /* collect 32-bit blocks from current buffer */ while (space >= sizeof(uint32_t)) { l ^= get_le32(data); michael_block(l, r); data += sizeof(uint32_t), space -= sizeof(uint32_t); data_len -= sizeof(uint32_t); } /* * NB: when space is zero we make one more trip around * the loop to advance to the next mbuf where there is * data. This handles the case where there are 4*n * bytes in an mbuf followed by <4 bytes in a later mbuf. * By making an extra trip we'll drop out of the loop * with m pointing at the mbuf with 3 bytes and space * set as required by the remainder handling below. */ if (data_len == 0 || (data_len < sizeof(uint32_t) && space != 0)) break; m = m->m_next; if (m == NULL) { KASSERT(0, ("out of data, data_len %zu\n", data_len)); break; } if (space != 0) { const uint8_t *data_next; /* * Block straddles buffers, split references. */ data_next = mtod(m, const uint8_t *); KASSERT(m->m_len >= sizeof(uint32_t) - space, ("not enough data in following buffer, " "m_len %u need %zu\n", m->m_len, sizeof(uint32_t) - space)); switch (space) { case 1: l ^= get_le32_split(data[0], data_next[0], data_next[1], data_next[2]); data = data_next + 3; space = m->m_len - 3; break; case 2: l ^= get_le32_split(data[0], data[1], data_next[0], data_next[1]); data = data_next + 2; space = m->m_len - 2; break; case 3: l ^= get_le32_split(data[0], data[1], data[2], data_next[0]); data = data_next + 1; space = m->m_len - 1; break; } michael_block(l, r); data_len -= sizeof(uint32_t); } else { /* * Setup for next buffer. */ data = mtod(m, const uint8_t *); space = m->m_len; } } /* * Catch degenerate cases like mbuf[4*n+1 bytes] followed by * mbuf[2 bytes]. I don't believe these should happen; if they * do then we'll need more involved logic. */ KASSERT(data_len <= space, ("not enough data, data_len %zu space %u\n", data_len, space)); /* Last block and padding (0x5a, 4..7 x 0) */ switch (data_len) { case 0: l ^= get_le32_split(0x5a, 0, 0, 0); break; case 1: l ^= get_le32_split(data[0], 0x5a, 0, 0); break; case 2: l ^= get_le32_split(data[0], data[1], 0x5a, 0); break; case 3: l ^= get_le32_split(data[0], data[1], data[2], 0x5a); break; } michael_block(l, r); /* l ^= 0; */ michael_block(l, r); put_le32(mic, l); put_le32(mic + 4, r); } static int tkip_encrypt(struct tkip_ctx *ctx, struct ieee80211_key *key, struct mbuf *m, int hdrlen) { struct ieee80211_frame *wh; uint8_t icv[IEEE80211_WEP_CRCLEN]; ctx->tc_vap->iv_stats.is_crypto_tkip++; wh = mtod(m, struct ieee80211_frame *); if ((u16)(key->wk_keytsc) == 0 || key->wk_keytsc == 1) { tkip_mixing_phase1(ctx->tx_ttak, key->wk_key, wh->i_addr2, (u32)(key->wk_keytsc >> 16)); } tkip_mixing_phase2(ctx->tx_rc4key, key->wk_key, ctx->tx_ttak, (u16) key->wk_keytsc); wep_encrypt(ctx->tx_rc4key, m, hdrlen + tkip.ic_header, m->m_pkthdr.len - (hdrlen + tkip.ic_header), icv); (void) m_append(m, IEEE80211_WEP_CRCLEN, icv); /* XXX check return */ return 1; } static int tkip_decrypt(struct tkip_ctx *ctx, struct ieee80211_key *key, struct mbuf *m, int hdrlen) { struct ieee80211_frame *wh; struct ieee80211vap *vap = ctx->tc_vap; u32 iv32; u16 iv16; u8 tid; vap->iv_stats.is_crypto_tkip++; wh = mtod(m, struct ieee80211_frame *); /* NB: tkip_decap already verified header and left seq in rx_rsc */ iv16 = (u16) ctx->rx_rsc; iv32 = (u32) (ctx->rx_rsc >> 16); tid = ieee80211_gettid(wh); if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16) || !ctx->rx_phase1_done) { tkip_mixing_phase1(ctx->rx_ttak, key->wk_key, wh->i_addr2, iv32); ctx->rx_phase1_done = 1; } tkip_mixing_phase2(ctx->rx_rc4key, key->wk_key, ctx->rx_ttak, iv16); /* NB: m is unstripped; deduct headers + ICV to get payload */ if (wep_decrypt(ctx->rx_rc4key, m, hdrlen + tkip.ic_header, m->m_pkthdr.len - (hdrlen + tkip.ic_header + tkip.ic_trailer))) { if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16)) { /* Previously cached Phase1 result was already lost, so * it needs to be recalculated for the next packet. */ ctx->rx_phase1_done = 0; } IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s", "TKIP ICV mismatch on decrypt"); vap->iv_stats.is_rx_tkipicv++; return 0; } return 1; } /* * Module glue. */ IEEE80211_CRYPTO_MODULE(tkip, 1); diff --git a/sys/net80211/ieee80211_crypto_wep.c b/sys/net80211/ieee80211_crypto_wep.c index 164157559cb2..113bb726a54b 100644 --- a/sys/net80211/ieee80211_crypto_wep.c +++ b/sys/net80211/ieee80211_crypto_wep.c @@ -1,519 +1,519 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 WEP crypto support. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include static void *wep_attach(struct ieee80211vap *, struct ieee80211_key *); static void wep_detach(struct ieee80211_key *); static int wep_setkey(struct ieee80211_key *); static void wep_setiv(struct ieee80211_key *, uint8_t *); static int wep_encap(struct ieee80211_key *, struct mbuf *); static int wep_decap(struct ieee80211_key *, struct mbuf *, int); static int wep_enmic(struct ieee80211_key *, struct mbuf *, int); static int wep_demic(struct ieee80211_key *, struct mbuf *, int); static const struct ieee80211_cipher wep = { .ic_name = "WEP", .ic_cipher = IEEE80211_CIPHER_WEP, .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, .ic_trailer = IEEE80211_WEP_CRCLEN, .ic_miclen = 0, .ic_attach = wep_attach, .ic_detach = wep_detach, .ic_setkey = wep_setkey, .ic_setiv = wep_setiv, .ic_encap = wep_encap, .ic_decap = wep_decap, .ic_enmic = wep_enmic, .ic_demic = wep_demic, }; static int wep_encrypt(struct ieee80211_key *, struct mbuf *, int hdrlen); static int wep_decrypt(struct ieee80211_key *, struct mbuf *, int hdrlen); struct wep_ctx { struct ieee80211vap *wc_vap; /* for diagnostics+statistics */ struct ieee80211com *wc_ic; uint32_t wc_iv; /* initial vector for crypto */ }; /* number of references from net80211 layer */ static int nrefs = 0; static void * wep_attach(struct ieee80211vap *vap, struct ieee80211_key *k) { struct wep_ctx *ctx; ctx = (struct wep_ctx *) IEEE80211_MALLOC(sizeof(struct wep_ctx), M_80211_CRYPTO, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (ctx == NULL) { vap->iv_stats.is_crypto_nomem++; return NULL; } ctx->wc_vap = vap; ctx->wc_ic = vap->iv_ic; net80211_get_random_bytes(&ctx->wc_iv, sizeof(ctx->wc_iv)); nrefs++; /* NB: we assume caller locking */ return ctx; } static void wep_detach(struct ieee80211_key *k) { struct wep_ctx *ctx = k->wk_private; IEEE80211_FREE(ctx, M_80211_CRYPTO); KASSERT(nrefs > 0, ("imbalanced attach/detach")); nrefs--; /* NB: we assume caller locking */ } static int wep_setkey(struct ieee80211_key *k) { return k->wk_keylen >= 40/NBBY; } static void wep_setiv(struct ieee80211_key *k, uint8_t *ivp) { struct wep_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->wc_vap; uint32_t iv; uint8_t keyid; keyid = ieee80211_crypto_get_keyid(vap, k) << 6; /* * XXX * IV must not duplicate during the lifetime of the key. * But no mechanism to renew keys is defined in IEEE 802.11 * for WEP. And the IV may be duplicated at other stations * because the session key itself is shared. So we use a * pseudo random IV for now, though it is not the right way. * * NB: Rather than use a strictly random IV we select a * random one to start and then increment the value for * each frame. This is an explicit tradeoff between * overhead and security. Given the basic insecurity of * WEP this seems worthwhile. */ /* * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 */ iv = ctx->wc_iv; if ((iv & 0xff00) == 0xff00) { int B = (iv & 0xff0000) >> 16; if (3 <= B && B < 16) iv += 0x0100; } ctx->wc_iv = iv + 1; /* * NB: Preserve byte order of IV for packet * sniffers; it doesn't matter otherwise. */ #if _BYTE_ORDER == _BIG_ENDIAN ivp[0] = iv >> 0; ivp[1] = iv >> 8; ivp[2] = iv >> 16; #else ivp[2] = iv >> 0; ivp[1] = iv >> 8; ivp[0] = iv >> 16; #endif ivp[3] = keyid; } /* * Add privacy headers appropriate for the specified key. */ static int wep_encap(struct ieee80211_key *k, struct mbuf *m) { struct wep_ctx *ctx = k->wk_private; struct ieee80211com *ic = ctx->wc_ic; struct ieee80211_frame *wh; uint8_t *ivp; int hdrlen; int is_mgmt; hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); wh = mtod(m, struct ieee80211_frame *); is_mgmt = IEEE80211_IS_MGMT(wh); /* * Check to see if IV is required. */ if (is_mgmt && (k->wk_flags & IEEE80211_KEY_NOIVMGT)) return 1; if ((! is_mgmt) && (k->wk_flags & IEEE80211_KEY_NOIV)) return 1; /* * Copy down 802.11 header and add the IV + KeyID. */ - M_PREPEND(m, wep.ic_header, M_NOWAIT); + M_PREPEND(m, wep.ic_header, IEEE80211_M_NOWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); ovbcopy(ivp + wep.ic_header, ivp, hdrlen); ivp += hdrlen; wep_setiv(k, ivp); /* * Finally, do software encrypt if needed. */ if ((k->wk_flags & IEEE80211_KEY_SWENCRYPT) && !wep_encrypt(k, m, hdrlen)) return 0; return 1; } /* * Add MIC to the frame as needed. */ static int wep_enmic(struct ieee80211_key *k, struct mbuf *m, int force) { return 1; } /* * Validate and strip privacy headers (and trailer) for a * received frame. If necessary, decrypt the frame using * the specified key. */ static int wep_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen) { struct wep_ctx *ctx = k->wk_private; struct ieee80211vap *vap = ctx->wc_vap; const struct ieee80211_rx_stats *rxs; rxs = ieee80211_get_rx_params_ptr(m); if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP)) goto finish; /* * Check if the device handled the decrypt in hardware. * If so we just strip the header; otherwise we need to * handle the decrypt in software. */ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) && !wep_decrypt(k, m, hdrlen)) { #ifdef IEEE80211_DEBUG struct ieee80211_frame *wh; wh = mtod(m, struct ieee80211_frame *); #endif IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s", "WEP ICV mismatch on decrypt"); vap->iv_stats.is_rx_wepfail++; return 0; } /* * Copy up 802.11 header and strip crypto bits. */ ovbcopy(mtod(m, void *), mtod(m, uint8_t *) + wep.ic_header, hdrlen); m_adj(m, wep.ic_header); finish: /* XXX TODO: do we have to strip this for offload devices? */ m_adj(m, -wep.ic_trailer); return 1; } /* * Verify and strip MIC from the frame. */ static int wep_demic(struct ieee80211_key *k, struct mbuf *skb, int force) { return 1; } static const uint32_t crc32_table[256] = { 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, 0x2d02ef8dL }; static int wep_encrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen) { #define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0) struct wep_ctx *ctx = key->wk_private; struct ieee80211vap *vap = ctx->wc_vap; struct mbuf *m = m0; uint8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE]; uint8_t icv[IEEE80211_WEP_CRCLEN]; uint32_t i, j, k, crc; size_t buflen, data_len; uint8_t S[256]; uint8_t *pos; u_int off, keylen; vap->iv_stats.is_crypto_wep++; /* NB: this assumes the header was pulled up */ memcpy(rc4key, mtod(m, uint8_t *) + hdrlen, IEEE80211_WEP_IVLEN); memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen); /* Setup RC4 state */ for (i = 0; i < 256; i++) S[i] = i; j = 0; keylen = key->wk_keylen + IEEE80211_WEP_IVLEN; for (i = 0; i < 256; i++) { j = (j + S[i] + rc4key[i % keylen]) & 0xff; S_SWAP(i, j); } off = hdrlen + wep.ic_header; data_len = m->m_pkthdr.len - off; /* Compute CRC32 over unencrypted data and apply RC4 to data */ crc = ~0; i = j = 0; pos = mtod(m, uint8_t *) + off; buflen = m->m_len - off; for (;;) { if (buflen > data_len) buflen = data_len; data_len -= buflen; for (k = 0; k < buflen; k++) { crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); *pos++ ^= S[(S[i] + S[j]) & 0xff]; } if (m->m_next == NULL) { if (data_len != 0) { /* out of data */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, ether_sprintf(mtod(m0, struct ieee80211_frame *)->i_addr2), "out of data for WEP (data_len %zu)", data_len); /* XXX stat */ return 0; } break; } m = m->m_next; pos = mtod(m, uint8_t *); buflen = m->m_len; } crc = ~crc; /* Append little-endian CRC32 and encrypt it to produce ICV */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); icv[k] ^= S[(S[i] + S[j]) & 0xff]; } return m_append(m0, IEEE80211_WEP_CRCLEN, icv); #undef S_SWAP } static int wep_decrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen) { #define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0) struct wep_ctx *ctx = key->wk_private; struct ieee80211vap *vap = ctx->wc_vap; struct mbuf *m = m0; uint8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE]; uint8_t icv[IEEE80211_WEP_CRCLEN]; uint32_t i, j, k, crc; size_t buflen, data_len; uint8_t S[256]; uint8_t *pos; u_int off, keylen; vap->iv_stats.is_crypto_wep++; /* NB: this assumes the header was pulled up */ memcpy(rc4key, mtod(m, uint8_t *) + hdrlen, IEEE80211_WEP_IVLEN); memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen); /* Setup RC4 state */ for (i = 0; i < 256; i++) S[i] = i; j = 0; keylen = key->wk_keylen + IEEE80211_WEP_IVLEN; for (i = 0; i < 256; i++) { j = (j + S[i] + rc4key[i % keylen]) & 0xff; S_SWAP(i, j); } off = hdrlen + wep.ic_header; data_len = m->m_pkthdr.len - (off + wep.ic_trailer); /* Compute CRC32 over unencrypted data and apply RC4 to data */ crc = ~0; i = j = 0; pos = mtod(m, uint8_t *) + off; buflen = m->m_len - off; for (;;) { if (buflen > data_len) buflen = data_len; data_len -= buflen; for (k = 0; k < buflen; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); *pos ^= S[(S[i] + S[j]) & 0xff]; crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8); pos++; } m = m->m_next; if (m == NULL) { if (data_len != 0) { /* out of data */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, mtod(m0, struct ieee80211_frame *)->i_addr2, "out of data for WEP (data_len %zu)", data_len); return 0; } break; } pos = mtod(m, uint8_t *); buflen = m->m_len; } crc = ~crc; /* Encrypt little-endian CRC32 and verify that it matches with * received ICV */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) { i = (i + 1) & 0xff; j = (j + S[i]) & 0xff; S_SWAP(i, j); /* XXX assumes ICV is contiguous in mbuf */ if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) { /* ICV mismatch - drop frame */ return 0; } } return 1; #undef S_SWAP } /* * Module glue. */ IEEE80211_CRYPTO_MODULE(wep, 1); diff --git a/sys/net80211/ieee80211_freebsd.c b/sys/net80211/ieee80211_freebsd.c index 011ae7060a6c..5511791ae2aa 100644 --- a/sys/net80211/ieee80211_freebsd.c +++ b/sys/net80211/ieee80211_freebsd.c @@ -1,1185 +1,1185 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 support (FreeBSD-specific code) */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include DEBUGNET_DEFINE(ieee80211); SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "IEEE 80211 parameters"); #ifdef IEEE80211_DEBUG static int ieee80211_debug = 0; SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug, 0, "debugging printfs"); #endif static const char wlanname[] = "wlan"; static struct if_clone *wlan_cloner; /* * priv(9) NET80211 checks. * Return 0 if operation is allowed, E* (usually EPERM) otherwise. */ int ieee80211_priv_check_vap_getkey(u_long cmd __unused, struct ieee80211vap *vap __unused, struct ifnet *ifp __unused) { return (priv_check(curthread, PRIV_NET80211_VAP_GETKEY)); } int ieee80211_priv_check_vap_manage(u_long cmd __unused, struct ieee80211vap *vap __unused, struct ifnet *ifp __unused) { return (priv_check(curthread, PRIV_NET80211_VAP_MANAGE)); } int ieee80211_priv_check_vap_setmac(u_long cmd __unused, struct ieee80211vap *vap __unused, struct ifnet *ifp __unused) { return (priv_check(curthread, PRIV_NET80211_VAP_SETMAC)); } int ieee80211_priv_check_create_vap(u_long cmd __unused, struct ieee80211vap *vap __unused, struct ifnet *ifp __unused) { return (priv_check(curthread, PRIV_NET80211_CREATE_VAP)); } static int wlan_clone_create(struct if_clone *ifc, int unit, caddr_t params) { struct ieee80211_clone_params cp; struct ieee80211vap *vap; struct ieee80211com *ic; int error; error = ieee80211_priv_check_create_vap(0, NULL, NULL); if (error) return error; error = copyin(params, &cp, sizeof(cp)); if (error) return error; ic = ieee80211_find_com(cp.icp_parent); if (ic == NULL) return ENXIO; if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) { ic_printf(ic, "%s: invalid opmode %d\n", __func__, cp.icp_opmode); return EINVAL; } if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) { ic_printf(ic, "%s mode not supported\n", ieee80211_opmode_name[cp.icp_opmode]); return EOPNOTSUPP; } if ((cp.icp_flags & IEEE80211_CLONE_TDMA) && #ifdef IEEE80211_SUPPORT_TDMA (ic->ic_caps & IEEE80211_C_TDMA) == 0 #else (1) #endif ) { ic_printf(ic, "TDMA not supported\n"); return EOPNOTSUPP; } vap = ic->ic_vap_create(ic, wlanname, unit, cp.icp_opmode, cp.icp_flags, cp.icp_bssid, cp.icp_flags & IEEE80211_CLONE_MACADDR ? cp.icp_macaddr : ic->ic_macaddr); if (vap == NULL) return (EIO); #ifdef DEBUGNET if (ic->ic_debugnet_meth != NULL) DEBUGNET_SET(vap->iv_ifp, ieee80211); #endif return (0); } static void wlan_clone_destroy(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; ic->ic_vap_delete(vap); } void ieee80211_vap_destroy(struct ieee80211vap *vap) { CURVNET_SET(vap->iv_ifp->if_vnet); if_clone_destroyif(wlan_cloner, vap->iv_ifp); CURVNET_RESTORE(); } int ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS) { int msecs = ticks_to_msecs(*(int *)arg1); int error; error = sysctl_handle_int(oidp, &msecs, 0, req); if (error || !req->newptr) return error; *(int *)arg1 = msecs_to_ticks(msecs); return 0; } static int ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS) { int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT; int error; error = sysctl_handle_int(oidp, &inact, 0, req); if (error || !req->newptr) return error; *(int *)arg1 = inact / IEEE80211_INACT_WAIT; return 0; } static int ieee80211_sysctl_parent(SYSCTL_HANDLER_ARGS) { struct ieee80211com *ic = arg1; return SYSCTL_OUT_STR(req, ic->ic_name); } static int ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS) { struct ieee80211com *ic = arg1; int t = 0, error; error = sysctl_handle_int(oidp, &t, 0, req); if (error || !req->newptr) return error; IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, ic->ic_curchan); IEEE80211_UNLOCK(ic); return 0; } /* * For now, just restart everything. * * Later on, it'd be nice to have a separate VAP restart to * full-device restart. */ static int ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS) { struct ieee80211vap *vap = arg1; int t = 0, error; error = sysctl_handle_int(oidp, &t, 0, req); if (error || !req->newptr) return error; ieee80211_restart_all(vap->iv_ic); return 0; } void ieee80211_sysctl_attach(struct ieee80211com *ic) { } void ieee80211_sysctl_detach(struct ieee80211com *ic) { } void ieee80211_sysctl_vattach(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ifp; struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; char num[14]; /* sufficient for 32 bits */ ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list), M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (ctx == NULL) { if_printf(ifp, "%s: cannot allocate sysctl context!\n", __func__); return; } sysctl_ctx_init(ctx); snprintf(num, sizeof(num), "%u", ifp->if_dunit); oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan), OID_AUTO, num, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, vap->iv_ic, 0, ieee80211_sysctl_parent, "A", "parent device"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0, "driver capabilities"); #ifdef IEEE80211_DEBUG vap->iv_debug = ieee80211_debug; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "debug", CTLFLAG_RW, &vap->iv_debug, 0, "control debugging printfs"); #endif SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0, "consecutive beacon misses before scanning"); /* XXX inherit from tunables */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "inact_run", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &vap->iv_inact_run, 0, ieee80211_sysctl_inact, "I", "station inactivity timeout (sec)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "inact_probe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &vap->iv_inact_probe, 0, ieee80211_sysctl_inact, "I", "station inactivity probe timeout (sec)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "inact_auth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &vap->iv_inact_auth, 0, ieee80211_sysctl_inact, "I", "station authentication timeout (sec)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "inact_init", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &vap->iv_inact_init, 0, ieee80211_sysctl_inact, "I", "station initial state timeout (sec)"); if (vap->iv_htcaps & IEEE80211_HTC_HT) { SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "ampdu_mintraffic_bk", CTLFLAG_RW, &vap->iv_ampdu_mintraffic[WME_AC_BK], 0, "BK traffic tx aggr threshold (pps)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "ampdu_mintraffic_be", CTLFLAG_RW, &vap->iv_ampdu_mintraffic[WME_AC_BE], 0, "BE traffic tx aggr threshold (pps)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "ampdu_mintraffic_vo", CTLFLAG_RW, &vap->iv_ampdu_mintraffic[WME_AC_VO], 0, "VO traffic tx aggr threshold (pps)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "ampdu_mintraffic_vi", CTLFLAG_RW, &vap->iv_ampdu_mintraffic[WME_AC_VI], 0, "VI traffic tx aggr threshold (pps)"); } SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "force_restart", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vap, 0, ieee80211_sysctl_vap_restart, "I", "force a VAP restart"); if (vap->iv_caps & IEEE80211_C_DFS) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "radar", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vap->iv_ic, 0, ieee80211_sysctl_radar, "I", "simulate radar event"); } vap->iv_sysctl = ctx; vap->iv_oid = oid; } void ieee80211_sysctl_vdetach(struct ieee80211vap *vap) { if (vap->iv_sysctl != NULL) { sysctl_ctx_free(vap->iv_sysctl); IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF); vap->iv_sysctl = NULL; } } int ieee80211_com_vincref(struct ieee80211vap *vap) { uint32_t ostate; ostate = atomic_fetchadd_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); if (ostate & IEEE80211_COM_DETACHED) { atomic_subtract_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); return (ENETDOWN); } if (_IEEE80211_MASKSHIFT(ostate, IEEE80211_COM_REF) == IEEE80211_COM_REF_MAX) { atomic_subtract_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); return (EOVERFLOW); } return (0); } void ieee80211_com_vdecref(struct ieee80211vap *vap) { uint32_t ostate; ostate = atomic_fetchadd_32(&vap->iv_com_state, -IEEE80211_COM_REF_ADD); KASSERT(_IEEE80211_MASKSHIFT(ostate, IEEE80211_COM_REF) != 0, ("com reference counter underflow")); (void) ostate; } void ieee80211_com_vdetach(struct ieee80211vap *vap) { int sleep_time; sleep_time = msecs_to_ticks(250); atomic_set_32(&vap->iv_com_state, IEEE80211_COM_DETACHED); while (_IEEE80211_MASKSHIFT(atomic_load_32(&vap->iv_com_state), IEEE80211_COM_REF) != 0) pause("comref", sleep_time); } int ieee80211_node_dectestref(struct ieee80211_node *ni) { /* XXX need equivalent of atomic_dec_and_test */ atomic_subtract_int(&ni->ni_refcnt, 1); return atomic_cmpset_int(&ni->ni_refcnt, 0, 1); } void ieee80211_drain_ifq(struct ifqueue *ifq) { struct ieee80211_node *ni; struct mbuf *m; for (;;) { IF_DEQUEUE(ifq, m); if (m == NULL) break; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; KASSERT(ni != NULL, ("frame w/o node")); ieee80211_free_node(ni); m->m_pkthdr.rcvif = NULL; m_freem(m); } } void ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap) { struct ieee80211_node *ni; struct mbuf *m, **mprev; IF_LOCK(ifq); mprev = &ifq->ifq_head; while ((m = *mprev) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (ni != NULL && ni->ni_vap == vap) { *mprev = m->m_nextpkt; /* remove from list */ ifq->ifq_len--; m_freem(m); ieee80211_free_node(ni); /* reclaim ref */ } else mprev = &m->m_nextpkt; } /* recalculate tail ptr */ m = ifq->ifq_head; for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt) ; ifq->ifq_tail = m; IF_UNLOCK(ifq); } /* * As above, for mbufs allocated with m_gethdr/MGETHDR * or initialized by M_COPY_PKTHDR. */ #define MC_ALIGN(m, len) \ do { \ (m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \ } while (/* CONSTCOND */ 0) /* * Allocate and setup a management frame of the specified * size. We return the mbuf and a pointer to the start * of the contiguous data area that's been reserved based * on the packet length. The data area is forced to 32-bit * alignment and the buffer length to a multiple of 4 bytes. * This is done mainly so beacon frames (that require this) * can use this interface too. */ struct mbuf * ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen) { struct mbuf *m; u_int len; /* * NB: we know the mbuf routines will align the data area * so we don't need to do anything special. */ len = roundup2(headroom + pktlen, 4); KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len)); if (len < MINCLSIZE) { - m = m_gethdr(M_NOWAIT, MT_DATA); + m = m_gethdr(IEEE80211_M_NOWAIT, MT_DATA); /* * Align the data in case additional headers are added. * This should only happen when a WEP header is added * which only happens for shared key authentication mgt * frames which all fit in MHLEN. */ if (m != NULL) M_ALIGN(m, len); } else { - m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + m = m_getcl(IEEE80211_M_NOWAIT, MT_DATA, M_PKTHDR); if (m != NULL) MC_ALIGN(m, len); } if (m != NULL) { m->m_data += headroom; *frm = m->m_data; } return m; } #ifndef __NO_STRICT_ALIGNMENT /* * Re-align the payload in the mbuf. This is mainly used (right now) * to handle IP header alignment requirements on certain architectures. */ struct mbuf * ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align) { int pktlen, space; struct mbuf *n; pktlen = m->m_pkthdr.len; space = pktlen + align; if (space < MINCLSIZE) - n = m_gethdr(M_NOWAIT, MT_DATA); + n = m_gethdr(IEEE80211_M_NOWAIT, MT_DATA); else { - n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, + n = m_getjcl(IEEE80211_M_NOWAIT, MT_DATA, M_PKTHDR, space <= MCLBYTES ? MCLBYTES : #if MJUMPAGESIZE != MCLBYTES space <= MJUMPAGESIZE ? MJUMPAGESIZE : #endif space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES); } if (__predict_true(n != NULL)) { m_move_pkthdr(n, m); n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align); m_copydata(m, 0, pktlen, mtod(n, caddr_t)); n->m_len = pktlen; } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, mtod(m, const struct ieee80211_frame *), NULL, "%s", "no mbuf to realign"); vap->iv_stats.is_rx_badalign++; } m_freem(m); return n; } #endif /* !__NO_STRICT_ALIGNMENT */ int ieee80211_add_callback(struct mbuf *m, void (*func)(struct ieee80211_node *, void *, int), void *arg) { struct m_tag *mtag; struct ieee80211_cb *cb; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, - sizeof(struct ieee80211_cb), M_NOWAIT); + sizeof(struct ieee80211_cb), IEEE80211_M_NOWAIT); if (mtag == NULL) return 0; cb = (struct ieee80211_cb *)(mtag+1); cb->func = func; cb->arg = arg; m_tag_prepend(m, mtag); m->m_flags |= M_TXCB; return 1; } int ieee80211_add_xmit_params(struct mbuf *m, const struct ieee80211_bpf_params *params) { struct m_tag *mtag; struct ieee80211_tx_params *tx; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_XMIT_PARAMS, - sizeof(struct ieee80211_tx_params), M_NOWAIT); + sizeof(struct ieee80211_tx_params), IEEE80211_M_NOWAIT); if (mtag == NULL) return (0); tx = (struct ieee80211_tx_params *)(mtag+1); memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params)); m_tag_prepend(m, mtag); return (1); } int ieee80211_get_xmit_params(struct mbuf *m, struct ieee80211_bpf_params *params) { struct m_tag *mtag; struct ieee80211_tx_params *tx; mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_XMIT_PARAMS, NULL); if (mtag == NULL) return (-1); tx = (struct ieee80211_tx_params *)(mtag + 1); memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params)); return (0); } void ieee80211_process_callback(struct ieee80211_node *ni, struct mbuf *m, int status) { struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, NULL); if (mtag != NULL) { struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1); cb->func(ni, cb->arg, status); } } /* * Add RX parameters to the given mbuf. * * Returns 1 if OK, 0 on error. */ int ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs) { struct m_tag *mtag; struct ieee80211_rx_params *rx; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_RECV_PARAMS, - sizeof(struct ieee80211_rx_stats), M_NOWAIT); + sizeof(struct ieee80211_rx_stats), IEEE80211_M_NOWAIT); if (mtag == NULL) return (0); rx = (struct ieee80211_rx_params *)(mtag + 1); memcpy(&rx->params, rxs, sizeof(*rxs)); m_tag_prepend(m, mtag); return (1); } int ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs) { struct m_tag *mtag; struct ieee80211_rx_params *rx; mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_RECV_PARAMS, NULL); if (mtag == NULL) return (-1); rx = (struct ieee80211_rx_params *)(mtag + 1); memcpy(rxs, &rx->params, sizeof(*rxs)); return (0); } const struct ieee80211_rx_stats * ieee80211_get_rx_params_ptr(struct mbuf *m) { struct m_tag *mtag; struct ieee80211_rx_params *rx; mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_RECV_PARAMS, NULL); if (mtag == NULL) return (NULL); rx = (struct ieee80211_rx_params *)(mtag + 1); return (&rx->params); } /* * Add TOA parameters to the given mbuf. */ int ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p) { struct m_tag *mtag; struct ieee80211_toa_params *rp; mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_TOA_PARAMS, - sizeof(struct ieee80211_toa_params), M_NOWAIT); + sizeof(struct ieee80211_toa_params), IEEE80211_M_NOWAIT); if (mtag == NULL) return (0); rp = (struct ieee80211_toa_params *)(mtag + 1); memcpy(rp, p, sizeof(*rp)); m_tag_prepend(m, mtag); return (1); } int ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p) { struct m_tag *mtag; struct ieee80211_toa_params *rp; mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_TOA_PARAMS, NULL); if (mtag == NULL) return (0); rp = (struct ieee80211_toa_params *)(mtag + 1); if (p != NULL) memcpy(p, rp, sizeof(*p)); return (1); } /* * Transmit a frame to the parent interface. */ int ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m) { int error; /* * Assert the IC TX lock is held - this enforces the * processing -> queuing order is maintained */ IEEE80211_TX_LOCK_ASSERT(ic); error = ic->ic_transmit(ic, m); if (error) { struct ieee80211_node *ni; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; /* XXX number of fragments */ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); ieee80211_free_mbuf(m); } return (error); } /* * Transmit a frame to the VAP interface. */ int ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m) { struct ifnet *ifp = vap->iv_ifp; /* * When transmitting via the VAP, we shouldn't hold * any IC TX lock as the VAP TX path will acquire it. */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); return (ifp->if_transmit(ifp, m)); } #include void net80211_get_random_bytes(void *p, size_t n) { uint8_t *dp = p; while (n > 0) { uint32_t v = arc4random(); size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n; bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n); dp += sizeof(uint32_t), n -= nb; } } /* * Helper function for events that pass just a single mac address. */ static void notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211_join_event iev; CURVNET_SET(ifp->if_vnet); memset(&iev, 0, sizeof(iev)); IEEE80211_ADDR_COPY(iev.iev_addr, mac); rt_ieee80211msg(ifp, op, &iev, sizeof(iev)); CURVNET_RESTORE(); } void ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; CURVNET_SET_QUIET(ifp->if_vnet); IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join", (ni == vap->iv_bss) ? "bss " : ""); if (ni == vap->iv_bss) { notify_macaddr(ifp, newassoc ? RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid); if_link_state_change(ifp, LINK_STATE_UP); } else { notify_macaddr(ifp, newassoc ? RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr); } CURVNET_RESTORE(); } void ieee80211_notify_node_leave(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; CURVNET_SET_QUIET(ifp->if_vnet); IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave", (ni == vap->iv_bss) ? "bss " : ""); if (ni == vap->iv_bss) { rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0); if_link_state_change(ifp, LINK_STATE_DOWN); } else { /* fire off wireless event station leaving */ notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr); } CURVNET_RESTORE(); } void ieee80211_notify_scan_done(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ifp; IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done"); /* dispatch wireless event indicating scan completed */ CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0); CURVNET_RESTORE(); } void ieee80211_notify_replay_failure(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const struct ieee80211_key *k, u_int64_t rsc, int tid) { struct ifnet *ifp = vap->iv_ifp; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "%s replay detected tid %d ", k->wk_cipher->ic_name, tid, (intmax_t) rsc, (intmax_t) rsc, (intmax_t) k->wk_keyrsc[tid], (intmax_t) k->wk_keyrsc[tid], k->wk_keyix, k->wk_rxkeyix); if (ifp != NULL) { /* NB: for cipher test modules */ struct ieee80211_replay_event iev; IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1); IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2); iev.iev_cipher = k->wk_cipher->ic_cipher; if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE) iev.iev_keyix = k->wk_rxkeyix; else iev.iev_keyix = k->wk_keyix; iev.iev_keyrsc = k->wk_keyrsc[tid]; iev.iev_rsc = rsc; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_michael_failure(struct ieee80211vap *vap, const struct ieee80211_frame *wh, u_int keyix) { struct ifnet *ifp = vap->iv_ifp; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "michael MIC verification failed ", keyix); vap->iv_stats.is_rx_tkipmic++; if (ifp != NULL) { /* NB: for cipher test modules */ struct ieee80211_michael_event iev; IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1); IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2); iev.iev_cipher = IEEE80211_CIPHER_TKIP; iev.iev_keyix = keyix; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_wds_discover(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr); } void ieee80211_notify_csa(struct ieee80211com *ic, const struct ieee80211_channel *c, int mode, int count) { struct ieee80211_csa_event iev; struct ieee80211vap *vap; struct ifnet *ifp; memset(&iev, 0, sizeof(iev)); iev.iev_flags = c->ic_flags; iev.iev_freq = c->ic_freq; iev.iev_ieee = c->ic_ieee; iev.iev_mode = mode; iev.iev_count = count; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_radar(struct ieee80211com *ic, const struct ieee80211_channel *c) { struct ieee80211_radar_event iev; struct ieee80211vap *vap; struct ifnet *ifp; memset(&iev, 0, sizeof(iev)); iev.iev_flags = c->ic_flags; iev.iev_freq = c->ic_freq; iev.iev_ieee = c->ic_ieee; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_cac(struct ieee80211com *ic, const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type) { struct ieee80211_cac_event iev; struct ieee80211vap *vap; struct ifnet *ifp; memset(&iev, 0, sizeof(iev)); iev.iev_flags = c->ic_flags; iev.iev_freq = c->ic_freq; iev.iev_ieee = c->ic_ieee; iev.iev_type = type; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_node_deauth(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth"); notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr); } void ieee80211_notify_node_auth(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth"); notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr); } void ieee80211_notify_country(struct ieee80211vap *vap, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2]) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211_country_event iev; memset(&iev, 0, sizeof(iev)); IEEE80211_ADDR_COPY(iev.iev_addr, bssid); iev.iev_cc[0] = cc[0]; iev.iev_cc[1] = cc[1]; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev)); CURVNET_RESTORE(); } void ieee80211_notify_radio(struct ieee80211com *ic, int state) { struct ieee80211_radio_event iev; struct ieee80211vap *vap; struct ifnet *ifp; memset(&iev, 0, sizeof(iev)); iev.iev_state = state; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev)); CURVNET_RESTORE(); } } void ieee80211_notify_ifnet_change(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ifp; IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s\n", "interface state change"); CURVNET_SET(ifp->if_vnet); rt_ifmsg(ifp); CURVNET_RESTORE(); } void ieee80211_load_module(const char *modname) { #ifdef notyet (void)kern_kldload(curthread, modname, NULL); #else printf("%s: load the %s module by hand for now.\n", __func__, modname); #endif } static eventhandler_tag wlan_bpfevent; static eventhandler_tag wlan_ifllevent; static void bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach) { /* NB: identify vap's by if_init */ if (dlt == DLT_IEEE802_11_RADIO && ifp->if_init == ieee80211_init) { struct ieee80211vap *vap = ifp->if_softc; /* * Track bpf radiotap listener state. We mark the vap * to indicate if any listener is present and the com * to indicate if any listener exists on any associated * vap. This flag is used by drivers to prepare radiotap * state only when needed. */ if (attach) { ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF); if (vap->iv_opmode == IEEE80211_M_MONITOR) atomic_add_int(&vap->iv_ic->ic_montaps, 1); } else if (!bpf_peers_present(vap->iv_rawbpf)) { ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF); if (vap->iv_opmode == IEEE80211_M_MONITOR) atomic_subtract_int(&vap->iv_ic->ic_montaps, 1); } } } /* * Change MAC address on the vap (if was not started). */ static void wlan_iflladdr(void *arg __unused, struct ifnet *ifp) { /* NB: identify vap's by if_init */ if (ifp->if_init == ieee80211_init && (ifp->if_flags & IFF_UP) == 0) { struct ieee80211vap *vap = ifp->if_softc; IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp)); } } /* * Fetch the VAP name. * * This returns a const char pointer suitable for debugging, * but don't expect it to stick around for much longer. */ const char * ieee80211_get_vap_ifname(struct ieee80211vap *vap) { if (vap->iv_ifp == NULL) return "(none)"; return vap->iv_ifp->if_xname; } #ifdef DEBUGNET static void ieee80211_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct ieee80211vap *vap; struct ieee80211com *ic; vap = if_getsoftc(ifp); ic = vap->iv_ic; IEEE80211_LOCK(ic); ic->ic_debugnet_meth->dn8_init(ic, nrxr, ncl, clsize); IEEE80211_UNLOCK(ic); } static void ieee80211_debugnet_event(struct ifnet *ifp, enum debugnet_ev ev) { struct ieee80211vap *vap; struct ieee80211com *ic; vap = if_getsoftc(ifp); ic = vap->iv_ic; IEEE80211_LOCK(ic); ic->ic_debugnet_meth->dn8_event(ic, ev); IEEE80211_UNLOCK(ic); } static int ieee80211_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { return (ieee80211_vap_transmit(ifp, m)); } static int ieee80211_debugnet_poll(struct ifnet *ifp, int count) { struct ieee80211vap *vap; struct ieee80211com *ic; vap = if_getsoftc(ifp); ic = vap->iv_ic; return (ic->ic_debugnet_meth->dn8_poll(ic, count)); } #endif /* * Module glue. * * NB: the module name is "wlan" for compatibility with NetBSD. */ static int wlan_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("wlan: <802.11 Link Layer>\n"); wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track, bpf_track, 0, EVENTHANDLER_PRI_ANY); wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event, wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); wlan_cloner = if_clone_simple(wlanname, wlan_clone_create, wlan_clone_destroy, 0); return 0; case MOD_UNLOAD: if_clone_detach(wlan_cloner); EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent); EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent); return 0; } return EINVAL; } static moduledata_t wlan_mod = { wlanname, wlan_modevent, 0 }; DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(wlan, 1); MODULE_DEPEND(wlan, ether, 1, 1, 1); #ifdef IEEE80211_ALQ MODULE_DEPEND(wlan, alq, 1, 1, 1); #endif /* IEEE80211_ALQ */ diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c index 17c1de6c2894..2be8f241b59d 100644 --- a/sys/net80211/ieee80211_hostap.c +++ b/sys/net80211/ieee80211_hostap.c @@ -1,2480 +1,2480 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 HOSTAP mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #include /* for parse_wmeie */ #define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2) static void hostap_vattach(struct ieee80211vap *); static int hostap_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int hostap_input(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *, int rssi, int nf); static void hostap_deliver_data(struct ieee80211vap *, struct ieee80211_node *, struct mbuf *); static void hostap_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf); static void hostap_recv_ctl(struct ieee80211_node *, struct mbuf *, int); void ieee80211_hostap_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_HOSTAP] = hostap_vattach; } void ieee80211_hostap_detach(struct ieee80211com *ic) { } static void hostap_vdetach(struct ieee80211vap *vap) { } static void hostap_vattach(struct ieee80211vap *vap) { vap->iv_newstate = hostap_newstate; vap->iv_input = hostap_input; vap->iv_recv_mgmt = hostap_recv_mgmt; vap->iv_recv_ctl = hostap_recv_ctl; vap->iv_opdetach = hostap_vdetach; vap->iv_deliver_data = hostap_deliver_data; vap->iv_recv_pspoll = ieee80211_recv_pspoll; } static void sta_disassoc(void *arg, struct ieee80211_node *ni) { if (ni->ni_associd != 0) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_ASSOC_LEAVE); ieee80211_node_leave(ni); } } static void sta_csa(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; if (ni->ni_associd != 0) if (ni->ni_inact > vap->iv_inact_init) { ni->ni_inact = vap->iv_inact_init; IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni, "%s: inact %u", __func__, ni->ni_inact); } } static void sta_drop(void *arg, struct ieee80211_node *ni) { if (ni->ni_associd != 0) ieee80211_node_leave(ni); } /* * Does a channel change require associated stations to re-associate * so protocol state is correct. This is used when doing CSA across * bands or similar (e.g. HT -> legacy). */ static int isbandchange(struct ieee80211com *ic) { return ((ic->ic_bsschan->ic_flags ^ ic->ic_csa_newchan->ic_flags) & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER | IEEE80211_CHAN_HT)) != 0; } /* * IEEE80211_M_HOSTAP vap state machine handler. */ static int hostap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; case IEEE80211_S_CAC: ieee80211_dfs_cac_stop(vap); break; case IEEE80211_S_RUN: ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, sta_disassoc, NULL); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } if (vap->iv_auth->ia_detach != NULL) vap->iv_auth->ia_detach(vap); break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_CSA: case IEEE80211_S_RUN: ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, sta_disassoc, NULL); /* * Clear overlapping BSS state; the beacon frame * will be reconstructed on transition to the RUN * state and the timeout routines check if the flag * is set before doing anything so this is sufficient. */ vap->iv_flags_ext &= ~IEEE80211_FEXT_NONERP_PR; vap->iv_flags_ht &= ~IEEE80211_FHT_NONHT_PR; /* XXX TODO: schedule deferred update? */ /* fall thru... */ case IEEE80211_S_CAC: /* * NB: We may get here because of a manual channel * change in which case we need to stop CAC * XXX no need to stop if ostate RUN but it's ok */ ieee80211_dfs_cac_stop(vap); /* fall thru... */ case IEEE80211_S_INIT: if (vap->iv_des_chan != IEEE80211_CHAN_ANYC && !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) { /* * Already have a channel; bypass the * scan and startup immediately. * ieee80211_create_ibss will call back to * move us to RUN state. */ ieee80211_create_ibss(vap, vap->iv_des_chan); break; } /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; case IEEE80211_S_SCAN: /* * A state change requires a reset; scan. */ ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_CAC: /* * Start CAC on a DFS channel. We come here when starting * a bss on a DFS channel (see ieee80211_create_ibss). */ ieee80211_dfs_cac_start(vap); break; case IEEE80211_S_RUN: if (vap->iv_flags & IEEE80211_F_WPA) { /* XXX validate prerequisites */ } switch (ostate) { case IEEE80211_S_INIT: /* * Already have a channel; bypass the * scan and startup immediately. * Note that ieee80211_create_ibss will call * back to do a RUN->RUN state change. */ ieee80211_create_ibss(vap, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht)); /* NB: iv_bss is changed on return */ break; case IEEE80211_S_CAC: /* * NB: This is the normal state change when CAC * expires and no radar was detected; no need to * clear the CAC timer as it's already expired. */ /* fall thru... */ case IEEE80211_S_CSA: /* * Shorten inactivity timer of associated stations * to weed out sta's that don't follow a CSA. */ ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, sta_csa, NULL); /* * Update bss node channel to reflect where * we landed after CSA. */ ieee80211_node_set_chan(vap->iv_bss, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, ieee80211_htchanflags(vap->iv_bss->ni_chan))); /* XXX bypass debug msgs */ break; case IEEE80211_S_SCAN: case IEEE80211_S_RUN: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { struct ieee80211_node *ni = vap->iv_bss; ieee80211_note(vap, "synchronized with %s ssid ", ether_sprintf(ni->ni_bssid)); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); /* XXX MCS/HT */ printf(" channel %d start %uMb\n", ieee80211_chan2ieee(ic, ic->ic_curchan), IEEE80211_RATE2MBS(ni->ni_txrate)); } #endif break; default: break; } /* * Start/stop the authenticator. We delay until here * to allow configuration to happen out of order. */ if (vap->iv_auth->ia_attach != NULL) { /* XXX check failure */ vap->iv_auth->ia_attach(vap); } else if (vap->iv_auth->ia_detach != NULL) { vap->iv_auth->ia_detach(vap); } ieee80211_node_authorize(vap->iv_bss); break; case IEEE80211_S_CSA: if (ostate == IEEE80211_S_RUN && isbandchange(ic)) { /* * On a ``band change'' silently drop associated * stations as they must re-associate before they * can pass traffic (as otherwise protocol state * such as capabilities and the negotiated rate * set may/will be wrong). */ ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, sta_drop, NULL); } break; default: break; } return 0; } static void hostap_deliver_data(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); struct ifnet *ifp = vap->iv_ifp; /* clear driver/net80211 flags before passing up */ m->m_flags &= ~(M_MCAST | M_BCAST); m_clrprotoflags(m); KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP, ("gack, opmode %d", vap->iv_opmode)); /* * Do accounting. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); IEEE80211_NODE_STAT(ni, rx_data); IEEE80211_NODE_STAT_ADD(ni, rx_bytes, m->m_pkthdr.len); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { m->m_flags |= M_MCAST; /* XXX M_BCAST? */ IEEE80211_NODE_STAT(ni, rx_mcast); } else IEEE80211_NODE_STAT(ni, rx_ucast); /* perform as a bridge within the AP */ if ((vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) { struct mbuf *mcopy = NULL; if (m->m_flags & M_MCAST) { - mcopy = m_dup(m, M_NOWAIT); + mcopy = m_dup(m, IEEE80211_M_NOWAIT); if (mcopy == NULL) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else mcopy->m_flags |= M_MCAST; } else { /* * Check if the destination is associated with the * same vap and authorized to receive traffic. * Beware of traffic destined for the vap itself; * sending it will not work; just let it be delivered * normally. */ struct ieee80211_node *sta = ieee80211_find_vap_node( &vap->iv_ic->ic_sta, vap, eh->ether_dhost); if (sta != NULL) { if (ieee80211_node_is_authorized(sta)) { /* * Beware of sending to ourself; this * needs to happen via the normal * input path. */ if (sta != vap->iv_bss) { mcopy = m; m = NULL; } } else { vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(sta, rx_unauth); } ieee80211_free_node(sta); } } if (mcopy != NULL) (void) ieee80211_vap_xmitpkt(vap, mcopy); } if (m != NULL) { /* * Mark frame as coming from vap's interface. */ m->m_pkthdr.rcvif = ifp; if (m->m_flags & M_MCAST) { /* * Spam DWDS vap's w/ multicast traffic. */ /* XXX only if dwds in use? */ ieee80211_dwds_mcast(vap, m); } if (ni->ni_vlan != 0) { /* attach vlan tag */ m->m_pkthdr.ether_vtag = ni->ni_vlan; m->m_flags |= M_VLANTAG; } ifp->if_input(ifp, m); } } /* * Decide if a received management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return 0; } return 1; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int hostap_input(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; uint8_t *bssid; int is_hw_decrypted = 0; int has_decrypted = 0; /* * Some devices do hardware decryption all the way through * to pretending the frame wasn't encrypted in the first place. * So, tag it appropriately so it isn't discarded inappropriately. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_DECRYPTED)) is_hw_decrypted = 1; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (dir != IEEE80211_FC1_DIR_NODS) bssid = wh->i_addr1; else if (type == IEEE80211_FC0_TYPE_CTL) bssid = wh->i_addr1; else { if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (2): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } bssid = wh->i_addr3; } /* * Validate the bssid. */ if (!(type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_BEACON) && !IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) && !IEEE80211_ADDR_EQ(bssid, ifp->if_broadcastaddr)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, bssid, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (IEEE80211_HAS_SEQ(type, subtype)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; if (! ieee80211_check_rxseq(ni, wh, bssid, rxs)) goto out; } } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } if (!(dir == IEEE80211_FC1_DIR_TODS || (dir == IEEE80211_FC1_DIR_DSTODS && (vap->iv_flags & IEEE80211_F_DWDS)))) { if (dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_WDS, wh, "4-address data", "%s", "DWDS not enabled"); } vap->iv_stats.is_rx_wrongdir++; goto out; } /* check if source STA is associated */ if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "unknown src"); ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_NOT_AUTHED); vap->iv_stats.is_rx_notassoc++; goto err; } if (ni->ni_associd == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "unassoc src"); IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_NOT_ASSOCED); vap->iv_stats.is_rx_notassoc++; goto err; } /* * Check for power save state change. * XXX out-of-order A-MPDU frames? */ if (((wh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^ (ni->ni_flags & IEEE80211_NODE_PWR_MGT))) vap->iv_node_ps(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT); /* * For 4-address packets handle WDS discovery * notifications. Once a WDS link is setup frames * are just delivered to the WDS vap (see below). */ if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap == NULL) { if (!ieee80211_node_is_authorized(ni)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_WDS, wh, "4-address data", "%s", "unauthorized port"); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } ieee80211_dwds_discover(ni, m); return type; } /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && ieee80211_ampdu_reorder(ni, m, rxs) != 0) { m = NULL; goto out; } resubmit_ampdu: /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ if (is_hw_decrypted || IEEE80211_IS_PROTECTED(wh)) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } if (ieee80211_crypto_decap(ni, m, hdrspace, &key) == 0) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; has_decrypted = 1; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) qos = ieee80211_getqos(wh)[0]; else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace, has_decrypted); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace, qos); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } if (!(qos & IEEE80211_QOS_AMSDU)) eh = mtod(m, struct ether_header *); else eh = NULL; if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh == NULL || eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "unauthorized or " "unknown port: ether type 0x%x len %u", eh == NULL ? -1 : eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && ((has_decrypted == 0) && (m->m_flags & M_WEP) == 0) && (is_hw_decrypted == 0) && (eh == NULL || eh->ether_type != htons(ETHERTYPE_PAE))) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap != NULL) ieee80211_deliver_data(ni->ni_wdsvap, ni, m); else hostap_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "mgt", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } if (IEEE80211_IS_MULTICAST(wh->i_addr2)) { /* ensure return frames are unicast */ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "source is multicast: %s", ether_sprintf(wh->i_addr2)); vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */ goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name(subtype), ether_sprintf(wh->i_addr2), rssi); } #endif if (IEEE80211_IS_PROTECTED(wh)) { if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) { /* * Only shared key auth frames with a challenge * should be encrypted, discard all others. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; goto out; } hdrspace = ieee80211_hdrspace(ic, wh); if (ieee80211_crypto_decap(ni, m, hdrspace, &key) == 0) { /* NB: stats+msgs handled in crypto_decap */ goto out; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; has_decrypted = 1; } /* * Pass the packet to radiotap before calling iv_recv_mgmt(). * Otherwise iv_recv_mgmt() might pass another packet to * radiotap, resulting in out of order packet captures. */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; vap->iv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); vap->iv_recv_ctl(ni, m, subtype); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static void hostap_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "open auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX */ /* * Clear any challenge text that may be there if * a previous shared key auth failed and then an * open auth is attempted. */ if (ni->ni_challenge != NULL) { IEEE80211_FREE(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } /* XXX hack to workaround calling convention */ ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (IEEE80211_STATUS_ALG<<16)); return; } if (seq != IEEE80211_AUTH_OPEN_REQUEST) { vap->iv_stats.is_rx_bad_auth++; return; } /* always accept open authentication requests */ if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) return; } else if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0) (void) ieee80211_ref_node(ni); /* * Mark the node as referenced to reflect that it's * reference count has been bumped to insure it remains * after the transaction completes. */ ni->ni_flags |= IEEE80211_NODE_AREF; /* * Mark the node as requiring a valid association id * before outbound traffic is permitted. */ ni->ni_flags |= IEEE80211_NODE_ASSOCID; if (vap->iv_acl != NULL && vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) { /* * When the ACL policy is set to RADIUS we defer the * authorization to a user agent. Dispatch an event, * a subsequent MLME call will decide the fate of the * station. If the user agent is not present then the * node will be reclaimed due to inactivity. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL, ni->ni_macaddr, "%s", "station authentication defered (radius acl)"); ieee80211_notify_node_auth(ni); } else { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni->ni_macaddr, "%s", "station authenticated (open)"); /* * When 802.1x is not in use mark the port * authorized at this point so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); } } static void hostap_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh, uint8_t *frm, uint8_t *efrm, int rssi, int nf, uint16_t seq, uint16_t status) { struct ieee80211vap *vap = ni->ni_vap; uint8_t *challenge; int estatus; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); /* * NB: this can happen as we allow pre-shared key * authentication to be enabled w/o wep being turned * on so that configuration of these can be done * in any order. It may be better to enforce the * ordering in which case this check would just be * for sanity/consistency. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", " PRIVACY is disabled"); estatus = IEEE80211_STATUS_ALG; goto bad; } /* * Pre-shared key authentication is evil; accept * it only if explicitly configured (it is supported * mainly for compatibility with clients like Mac OS X). */ if (ni->ni_authmode != IEEE80211_AUTH_AUTO && ni->ni_authmode != IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */ estatus = IEEE80211_STATUS_ALG; goto bad; } challenge = NULL; if (frm + 1 < efrm) { if ((frm[1] + 2) > (efrm - frm)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "ie %d/%d too long", frm[0], (frm[1] + 2) - (efrm - frm)); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (*frm == IEEE80211_ELEMID_CHALLENGE) challenge = frm; frm += frm[1] + 2; } switch (seq) { case IEEE80211_AUTH_SHARED_CHALLENGE: case IEEE80211_AUTH_SHARED_RESPONSE: if (challenge == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "%s", "no challenge"); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (challenge[1] != IEEE80211_CHALLENGE_LEN) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad challenge len %d", challenge[1]); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } default: break; } switch (seq) { case IEEE80211_AUTH_SHARED_REQUEST: { #ifdef IEEE80211_DEBUG bool allocbs; #endif if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) { /* NB: no way to return an error */ return; } #ifdef IEEE80211_DEBUG allocbs = 1; #endif } else { if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0) (void) ieee80211_ref_node(ni); #ifdef IEEE80211_DEBUG allocbs = 0; #endif } /* * Mark the node as referenced to reflect that it's * reference count has been bumped to insure it remains * after the transaction completes. */ ni->ni_flags |= IEEE80211_NODE_AREF; /* * Mark the node as requiring a valid association id * before outbound traffic is permitted. */ ni->ni_flags |= IEEE80211_NODE_ASSOCID; IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (!ieee80211_alloc_challenge(ni)) { /* NB: don't return error so they rexmit */ return; } net80211_get_random_bytes(ni->ni_challenge, IEEE80211_CHALLENGE_LEN); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "shared key %sauth request", allocbs ? "" : "re"); /* * When the ACL policy is set to RADIUS we defer the * authorization to a user agent. Dispatch an event, * a subsequent MLME call will decide the fate of the * station. If the user agent is not present then the * node will be reclaimed due to inactivity. */ if (vap->iv_acl != NULL && vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL, ni->ni_macaddr, "%s", "station authentication defered (radius acl)"); ieee80211_notify_node_auth(ni); return; } break; } case IEEE80211_AUTH_SHARED_RESPONSE: if (ni == vap->iv_bss) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "unknown station"); /* NB: don't send a response */ return; } if (ni->ni_challenge == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "no challenge recorded"); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } if (memcmp(ni->ni_challenge, &challenge[2], challenge[1]) != 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key response", "%s", "challenge mismatch"); vap->iv_stats.is_rx_auth_fail++; estatus = IEEE80211_STATUS_CHALLENGE; goto bad; } IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "%s", "station authenticated (shared key)"); ieee80211_node_authorize(ni); break; default: IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "shared key auth", "bad seq %d", seq); vap->iv_stats.is_rx_bad_auth++; estatus = IEEE80211_STATUS_SEQUENCE; goto bad; } IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); return; bad: /* * Send an error response; but only when operating as an AP. */ /* XXX hack to workaround calling convention */ ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (estatus<<16)); } /* * Convert a WPA cipher selector OUI to an internal * cipher algorithm. Where appropriate we also * record any key length. */ static int wpa_cipher(const uint8_t *sel, uint8_t *keylen, uint8_t *cipher) { #define WPA_SEL(x) (((x)<<24)|WPA_OUI) uint32_t w = le32dec(sel); switch (w) { case WPA_SEL(WPA_CSE_NULL): *cipher = IEEE80211_CIPHER_NONE; break; case WPA_SEL(WPA_CSE_WEP40): if (keylen) *keylen = 40 / NBBY; *cipher = IEEE80211_CIPHER_WEP; break; case WPA_SEL(WPA_CSE_WEP104): if (keylen) *keylen = 104 / NBBY; *cipher = IEEE80211_CIPHER_WEP; break; case WPA_SEL(WPA_CSE_TKIP): *cipher = IEEE80211_CIPHER_TKIP; break; case WPA_SEL(WPA_CSE_CCMP): *cipher = IEEE80211_CIPHER_AES_CCM; break; default: return (EINVAL); } return (0); #undef WPA_SEL } /* * Convert a WPA key management/authentication algorithm * to an internal code. */ static int wpa_keymgmt(const uint8_t *sel) { #define WPA_SEL(x) (((x)<<24)|WPA_OUI) uint32_t w = le32dec(sel); switch (w) { case WPA_SEL(WPA_ASE_8021X_UNSPEC): return WPA_ASE_8021X_UNSPEC; case WPA_SEL(WPA_ASE_8021X_PSK): return WPA_ASE_8021X_PSK; case WPA_SEL(WPA_ASE_NONE): return WPA_ASE_NONE; } return 0; /* NB: so is discarded */ #undef WPA_SEL } /* * Parse a WPA information element to collect parameters. * Note that we do not validate security parameters; that * is handled by the authenticator; the parsing done here * is just for internal use in making operational decisions. */ static int ieee80211_parse_wpa(struct ieee80211vap *vap, const uint8_t *frm, struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh) { uint8_t len = frm[1]; uint32_t w; int error, n; /* * Check the length once for fixed parts: OUI, type, * version, mcast cipher, and 2 selector counts. * Other, variable-length data, must be checked separately. */ if ((vap->iv_flags & IEEE80211_F_WPA1) == 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "not WPA, flags 0x%x", vap->iv_flags); return IEEE80211_REASON_IE_INVALID; } if (len < 14) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "too short, len %u", len); return IEEE80211_REASON_IE_INVALID; } frm += 6, len -= 4; /* NB: len is payload only */ /* NB: iswpaoui already validated the OUI and type */ w = le16dec(frm); if (w != WPA_VERSION) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "bad version %u", w); return IEEE80211_REASON_IE_INVALID; } frm += 2, len -= 2; memset(rsn, 0, sizeof(*rsn)); /* multicast/group cipher */ error = wpa_cipher(frm, &rsn->rsn_mcastkeylen, &rsn->rsn_mcastcipher); if (error != 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "unknown mcast cipher suite %08X", le32dec(frm)); return IEEE80211_REASON_GROUP_CIPHER_INVALID; } frm += 4, len -= 4; /* unicast ciphers */ n = le16dec(frm); frm += 2, len -= 2; if (len < n*4+2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "ucast cipher data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { uint8_t cipher; error = wpa_cipher(frm, &rsn->rsn_ucastkeylen, &cipher); if (error == 0) w |= 1 << cipher; frm += 4, len -= 4; } if (w == 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "no usable pairwise cipher suite found (w=%d)", w); return IEEE80211_REASON_PAIRWISE_CIPHER_INVALID; } /* XXX other? */ if (w & (1 << IEEE80211_CIPHER_AES_CCM)) rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM; else rsn->rsn_ucastcipher = IEEE80211_CIPHER_TKIP; /* key management algorithms */ n = le16dec(frm); frm += 2, len -= 2; if (len < n*4) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "key mgmt alg data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= wpa_keymgmt(frm); frm += 4, len -= 4; } if (w & WPA_ASE_8021X_UNSPEC) rsn->rsn_keymgmt = WPA_ASE_8021X_UNSPEC; else rsn->rsn_keymgmt = WPA_ASE_8021X_PSK; if (len > 2) /* optional capabilities */ rsn->rsn_caps = le16dec(frm); return 0; } /* * Convert an RSN cipher selector OUI to an internal * cipher algorithm. Where appropriate we also * record any key length. */ static int rsn_cipher(const uint8_t *sel, uint8_t *keylen, uint8_t *cipher) { #define RSN_SEL(x) (((x)<<24)|RSN_OUI) uint32_t w = le32dec(sel); switch (w) { case RSN_SEL(RSN_CSE_NULL): *cipher = IEEE80211_CIPHER_NONE; break; case RSN_SEL(RSN_CSE_WEP40): if (keylen) *keylen = 40 / NBBY; *cipher = IEEE80211_CIPHER_WEP; break; case RSN_SEL(RSN_CSE_WEP104): if (keylen) *keylen = 104 / NBBY; *cipher = IEEE80211_CIPHER_WEP; break; case RSN_SEL(RSN_CSE_TKIP): *cipher = IEEE80211_CIPHER_TKIP; break; case RSN_SEL(RSN_CSE_CCMP): *cipher = IEEE80211_CIPHER_AES_CCM; break; case RSN_SEL(RSN_CSE_WRAP): *cipher = IEEE80211_CIPHER_AES_OCB; break; default: return (EINVAL); } return (0); #undef WPA_SEL } /* * Convert an RSN key management/authentication algorithm * to an internal code. */ static int rsn_keymgmt(const uint8_t *sel) { #define RSN_SEL(x) (((x)<<24)|RSN_OUI) uint32_t w = le32dec(sel); switch (w) { case RSN_SEL(RSN_ASE_8021X_UNSPEC): return RSN_ASE_8021X_UNSPEC; case RSN_SEL(RSN_ASE_8021X_PSK): return RSN_ASE_8021X_PSK; case RSN_SEL(RSN_ASE_NONE): return RSN_ASE_NONE; } return 0; /* NB: so is discarded */ #undef RSN_SEL } /* * Parse a WPA/RSN information element to collect parameters * and validate the parameters against what has been * configured for the system. */ static int ieee80211_parse_rsn(struct ieee80211vap *vap, const uint8_t *frm, struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh) { uint8_t len = frm[1]; uint32_t w; int error, n; /* * Check the length once for fixed parts: * version, mcast cipher, and 2 selector counts. * Other, variable-length data, must be checked separately. */ if ((vap->iv_flags & IEEE80211_F_WPA2) == 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "WPA", "not RSN, flags 0x%x", vap->iv_flags); return IEEE80211_REASON_IE_INVALID; } /* XXX may be shorter */ if (len < 10) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "too short, len %u", len); return IEEE80211_REASON_IE_INVALID; } frm += 2; w = le16dec(frm); if (w != RSN_VERSION) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "bad version %u", w); return IEEE80211_REASON_UNSUPP_RSN_IE_VERSION; } frm += 2, len -= 2; memset(rsn, 0, sizeof(*rsn)); /* multicast/group cipher */ error = rsn_cipher(frm, &rsn->rsn_mcastkeylen, &rsn->rsn_mcastcipher); if (error != 0) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "unknown mcast cipher suite %08X", le32dec(frm)); return IEEE80211_REASON_GROUP_CIPHER_INVALID; } if (rsn->rsn_mcastcipher == IEEE80211_CIPHER_NONE) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "invalid mcast cipher suite %d", rsn->rsn_mcastcipher); return IEEE80211_REASON_GROUP_CIPHER_INVALID; } frm += 4, len -= 4; /* unicast ciphers */ n = le16dec(frm); frm += 2, len -= 2; if (len < n*4+2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "ucast cipher data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { uint8_t cipher; error = rsn_cipher(frm, &rsn->rsn_ucastkeylen, &cipher); if (error == 0) w |= 1 << cipher; frm += 4, len -= 4; } if (w & (1 << IEEE80211_CIPHER_AES_CCM)) rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM; else if (w & (1 << IEEE80211_CIPHER_AES_OCB)) rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_OCB; else if (w & (1 << IEEE80211_CIPHER_TKIP)) rsn->rsn_ucastcipher = IEEE80211_CIPHER_TKIP; else if ((w & (1 << IEEE80211_CIPHER_NONE)) && (rsn->rsn_mcastcipher == IEEE80211_CIPHER_WEP || rsn->rsn_mcastcipher == IEEE80211_CIPHER_TKIP)) rsn->rsn_ucastcipher = IEEE80211_CIPHER_NONE; else { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "no usable pairwise cipher suite found (w=%d)", w); return IEEE80211_REASON_PAIRWISE_CIPHER_INVALID; } /* key management algorithms */ n = le16dec(frm); frm += 2, len -= 2; if (len < n*4) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA, wh, "RSN", "key mgmt alg data too short; len %u, n %u", len, n); return IEEE80211_REASON_IE_INVALID; } w = 0; for (; n > 0; n--) { w |= rsn_keymgmt(frm); frm += 4, len -= 4; } if (w & RSN_ASE_8021X_UNSPEC) rsn->rsn_keymgmt = RSN_ASE_8021X_UNSPEC; else rsn->rsn_keymgmt = RSN_ASE_8021X_PSK; /* optional RSN capabilities */ if (len > 2) rsn->rsn_caps = le16dec(frm); /* XXXPMKID */ return 0; } /* * WPA/802.11i association request processing. */ static int wpa_assocreq(struct ieee80211_node *ni, struct ieee80211_rsnparms *rsnparms, const struct ieee80211_frame *wh, const uint8_t *wpa, const uint8_t *rsn, uint16_t capinfo) { struct ieee80211vap *vap = ni->ni_vap; uint8_t reason; int badwparsn; ni->ni_flags &= ~(IEEE80211_NODE_WPS|IEEE80211_NODE_TSN); if (wpa == NULL && rsn == NULL) { if (vap->iv_flags_ext & IEEE80211_FEXT_WPS) { /* * W-Fi Protected Setup (WPS) permits * clients to associate and pass EAPOL frames * to establish initial credentials. */ ni->ni_flags |= IEEE80211_NODE_WPS; return 1; } if ((vap->iv_flags_ext & IEEE80211_FEXT_TSN) && (capinfo & IEEE80211_CAPINFO_PRIVACY)) { /* * Transitional Security Network. Permits clients * to associate and use WEP while WPA is configured. */ ni->ni_flags |= IEEE80211_NODE_TSN; return 1; } IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, wh, NULL, "%s", "no WPA/RSN IE in association request"); vap->iv_stats.is_rx_assoc_badwpaie++; reason = IEEE80211_REASON_IE_INVALID; goto bad; } /* assert right association security credentials */ badwparsn = 0; /* NB: to silence compiler */ switch (vap->iv_flags & IEEE80211_F_WPA) { case IEEE80211_F_WPA1: badwparsn = (wpa == NULL); break; case IEEE80211_F_WPA2: badwparsn = (rsn == NULL); break; case IEEE80211_F_WPA1|IEEE80211_F_WPA2: badwparsn = (wpa == NULL && rsn == NULL); break; } if (badwparsn) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, wh, NULL, "%s", "missing WPA/RSN IE in association request"); vap->iv_stats.is_rx_assoc_badwpaie++; reason = IEEE80211_REASON_IE_INVALID; goto bad; } /* * Parse WPA/RSN information element. */ if (wpa != NULL) reason = ieee80211_parse_wpa(vap, wpa, rsnparms, wh); else reason = ieee80211_parse_rsn(vap, rsn, rsnparms, wh); if (reason != 0) { /* XXX wpa->rsn fallback? */ /* XXX distinguish WPA/RSN? */ vap->iv_stats.is_rx_assoc_badwpaie++; goto bad; } IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, ni, "%s ie: mc %u/%u uc %u/%u key %u caps 0x%x", wpa != NULL ? "WPA" : "RSN", rsnparms->rsn_mcastcipher, rsnparms->rsn_mcastkeylen, rsnparms->rsn_ucastcipher, rsnparms->rsn_ucastkeylen, rsnparms->rsn_keymgmt, rsnparms->rsn_caps); return 1; bad: ieee80211_node_deauth(ni, reason); return 0; } /* XXX find a better place for definition */ struct l2_update_frame { struct ether_header eh; uint8_t dsap; uint8_t ssap; uint8_t control; uint8_t xid[3]; } __packed; /* * Deliver a TGf L2UF frame on behalf of a station. * This primes any bridge when the station is roaming * between ap's on the same wired network. */ static void ieee80211_deliver_l2uf(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = vap->iv_ifp; struct mbuf *m; struct l2_update_frame *l2uf; struct ether_header *eh; - m = m_gethdr(M_NOWAIT, MT_DATA); + m = m_gethdr(IEEE80211_M_NOWAIT, MT_DATA); if (m == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "%s", "no mbuf for l2uf frame"); vap->iv_stats.is_rx_nobuf++; /* XXX not right */ return; } l2uf = mtod(m, struct l2_update_frame *); eh = &l2uf->eh; /* dst: Broadcast address */ IEEE80211_ADDR_COPY(eh->ether_dhost, ifp->if_broadcastaddr); /* src: associated STA */ IEEE80211_ADDR_COPY(eh->ether_shost, ni->ni_macaddr); eh->ether_type = htons(sizeof(*l2uf) - sizeof(*eh)); l2uf->dsap = 0; l2uf->ssap = 0; l2uf->control = 0xf5; l2uf->xid[0] = 0x81; l2uf->xid[1] = 0x80; l2uf->xid[2] = 0x00; m->m_pkthdr.len = m->m_len = sizeof(*l2uf); hostap_deliver_data(vap, ni, m); } static void ratesetmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp, const char *tag, int rate) { IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s rate set mismatch, rate/MCS %d", reassoc ? "reassoc" : "assoc", tag, rate & IEEE80211_RATE_VAL); IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_BASIC_RATE); ieee80211_node_leave(ni); } static void capinfomismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp, const char *tag, int capinfo) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s mismatch 0x%x", reassoc ? "reassoc" : "assoc", tag, capinfo); IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_CAPINFO); ieee80211_node_leave(ni); vap->iv_stats.is_rx_assoc_capmismatch++; } static void htcapmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int reassoc, int resp) { IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, %s missing HT ie", reassoc ? "reassoc" : "assoc"); /* XXX no better code */ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_MISSING_HT_CAPS); ieee80211_node_leave(ni); } static void authalgreject(struct ieee80211_node *ni, const struct ieee80211_frame *wh, int algo, int seq, int status) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "unsupported alg %d", algo); vap->iv_stats.is_rx_auth_unsupported++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, seq | (status << 16)); } static __inline int ishtmixed(const uint8_t *ie) { const struct ieee80211_ie_htinfo *ht = (const struct ieee80211_ie_htinfo *) ie; return (ht->hi_byte2 & IEEE80211_HTINFO_OPMODE) == IEEE80211_HTINFO_OPMODE_MIXED; } static int is11bclient(const uint8_t *rates, const uint8_t *xrates) { static const uint32_t brates = (1<<2*1)|(1<<2*2)|(1<<11)|(1<<2*11); int i; /* NB: the 11b clients we care about will not have xrates */ if (xrates != NULL || rates == NULL) return 0; for (i = 0; i < rates[1]; i++) { int r = rates[2+i] & IEEE80211_RATE_VAL; if (r > 2*11 || ((1<ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; uint8_t *frm, *efrm, *sfrm; uint8_t *ssid, *rates, *xrates, *wpa, *rsn, *wme, *ath, *htcap; uint8_t *vhtcap, *vhtinfo; int reassoc, resp; uint8_t rate; wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: /* * We process beacon/probe response frames when scanning; * otherwise we check beacon frames for overlapping non-ERP * BSS in 11g and/or overlapping legacy BSS when in HT. */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* FALLTHROUGH */ case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; /* NB: accept off-channel frames */ /* XXX TODO: use rxstatus to determine off-channel details */ if (ieee80211_parse_beacon(ni, m0, ic->ic_curchan, &scan) &~ IEEE80211_BPARSE_OFFCHAN) return; /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (scan.status == 0 && /* NB: on channel */ (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN)) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, ic->ic_curchan, &scan, wh, subtype, rssi, nf); return; } /* * Check beacon for overlapping bss w/ non ERP stations. * If we detect one and protection is configured but not * enabled, enable it and start a timer that'll bring us * out if we stop seeing the bss. */ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && scan.status == 0 && /* NB: on-channel */ ((scan.erp & 0x100) == 0 || /* NB: no ERP, 11b sta*/ (scan.erp & IEEE80211_ERP_NON_ERP_PRESENT))) { vap->iv_lastnonerp = ticks; vap->iv_flags_ext |= IEEE80211_FEXT_NONERP_PR; /* * XXX TODO: this may need to check all VAPs? */ if (vap->iv_protmode != IEEE80211_PROT_NONE && (vap->iv_flags & IEEE80211_F_USEPROT) == 0) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_ASSOC, wh, "non-ERP present on channel %d " "(saw erp 0x%x from channel %d), " "enable use of protection", ic->ic_curchan->ic_ieee, scan.erp, scan.chan); vap->iv_flags |= IEEE80211_F_USEPROT; ieee80211_vap_update_erp_protmode(vap); } } /* * Check beacon for non-HT station on HT channel * and update HT BSS occupancy as appropriate. */ if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) { if (scan.status & IEEE80211_BPARSE_OFFCHAN) { /* * Off control channel; only check frames * that come in the extension channel when * operating w/ HT40. */ if (!IEEE80211_IS_CHAN_HT40(ic->ic_curchan)) break; if (scan.chan != ic->ic_curchan->ic_extieee) break; } if (scan.htinfo == NULL) { ieee80211_htprot_update(vap, IEEE80211_HTINFO_OPMODE_PROTOPT | IEEE80211_HTINFO_NONHT_PRESENT); } else if (ishtmixed(scan.htinfo)) { /* XXX? take NONHT_PRESENT from beacon? */ ieee80211_htprot_update(vap, IEEE80211_HTINFO_OPMODE_MIXED | IEEE80211_HTINFO_NONHT_PRESENT); } } break; } case IEEE80211_FC0_SUBTYPE_PROBE_REQ: if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } /* * Consult the ACL policy module if setup. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; return; } /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates */ ssid = rates = xrates = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return); if ((vap->iv_flags & IEEE80211_F_HIDESSID) && ssid[1] == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "no ssid with ssid suppression enabled"); vap->iv_stats.is_rx_ssidmismatch++; /*XXX*/ return; } /* XXX find a better class or define it's own */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2, "%s", "recv probe req"); /* * Some legacy 11b clients cannot hack a complete * probe response frame. When the request includes * only a bare-bones rate set, communicate this to * the transmit side. */ ieee80211_send_proberesp(vap, wh->i_addr2, is11bclient(rates, xrates) ? IEEE80211_SEND_LEGACY_11B : 0); break; case IEEE80211_FC0_SUBTYPE_AUTH: { uint16_t algo, seq, status; if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "%s", "wrong bssid"); vap->iv_stats.is_rx_wrongbss++; /*XXX unique stat?*/ return; } /* * auth frame format * [2] algorithm * [2] sequence * [2] status * [tlv*] challenge */ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return); algo = le16toh(*(uint16_t *)frm); seq = le16toh(*(uint16_t *)(frm + 2)); status = le16toh(*(uint16_t *)(frm + 4)); IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2, "recv auth frame with algorithm %d seq %d", algo, seq); /* * Consult the ACL policy module if setup. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, (seq+1) | (IEEE80211_STATUS_UNSPECIFIED<<16)); return; } if (vap->iv_flags & IEEE80211_F_COUNTERM) { IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO, wh, NULL, "%s", "TKIP countermeasures enabled"); vap->iv_stats.is_rx_auth_countermeasures++; ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_REASON_MIC_FAILURE); return; } if (algo == IEEE80211_AUTH_ALG_SHARED) hostap_auth_shared(ni, wh, frm + 6, efrm, rssi, nf, seq, status); else if (algo == IEEE80211_AUTH_ALG_OPEN) hostap_auth_open(ni, wh, rssi, nf, seq, status); else if (algo == IEEE80211_AUTH_ALG_LEAP) { authalgreject(ni, wh, algo, seq+1, IEEE80211_STATUS_ALG); return; } else { /* * We assume that an unknown algorithm is the result * of a decryption failure on a shared key auth frame; * return a status code appropriate for that instead * of IEEE80211_STATUS_ALG. * * NB: a seq# of 4 is intentional; the decrypted * frame likely has a bogus seq value. */ authalgreject(ni, wh, algo, 4, IEEE80211_STATUS_CHALLENGE); return; } break; } case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: { uint16_t capinfo, lintval; struct ieee80211_rsnparms rsnparms; if (vap->iv_state != IEEE80211_S_RUN) { vap->iv_stats.is_rx_mgtdiscard++; return; } if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, NULL, "%s", "wrong bssid"); vap->iv_stats.is_rx_assoc_bss++; return; } if (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { reassoc = 1; resp = IEEE80211_FC0_SUBTYPE_REASSOC_RESP; } else { reassoc = 0; resp = IEEE80211_FC0_SUBTYPE_ASSOC_RESP; } if (ni == vap->iv_bss) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2, "deny %s request, sta not authenticated", reassoc ? "reassoc" : "assoc"); ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_ASSOC_NOT_AUTHED); vap->iv_stats.is_rx_assoc_notauth++; return; } /* * asreq frame format * [2] capability information * [2] listen interval * [6*] current AP address (reassoc only) * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [tlv] WPA or RSN * [tlv] HT capabilities * [tlv] Atheros capabilities */ IEEE80211_VERIFY_LENGTH(efrm - frm, (reassoc ? 10 : 4), return); capinfo = le16toh(*(uint16_t *)frm); frm += 2; lintval = le16toh(*(uint16_t *)frm); frm += 2; if (reassoc) frm += 6; /* ignore current AP info */ ssid = rates = xrates = wpa = rsn = wme = ath = htcap = NULL; vhtcap = vhtinfo = NULL; sfrm = frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; case IEEE80211_ELEMID_RSN: rsn = frm; break; case IEEE80211_ELEMID_HTCAP: htcap = frm; break; case IEEE80211_ELEMID_VHT_CAP: vhtcap = frm; break; case IEEE80211_ELEMID_VHT_OPMODE: vhtinfo = frm; break; case IEEE80211_ELEMID_VENDOR: if (iswpaoui(frm)) wpa = frm; else if (iswmeinfo(frm)) wme = frm; #ifdef IEEE80211_SUPPORT_SUPERG else if (isatherosoui(frm)) ath = frm; #endif else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) { if (ishtcapoui(frm) && htcap == NULL) htcap = frm; } break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return); if (htcap != NULL) { IEEE80211_VERIFY_LENGTH(htcap[1], htcap[0] == IEEE80211_ELEMID_VENDOR ? 4 + sizeof(struct ieee80211_ie_htcap)-2 : sizeof(struct ieee80211_ie_htcap)-2, return); /* XXX just NULL out? */ } /* Validate VHT IEs */ if (vhtcap != NULL) { IEEE80211_VERIFY_LENGTH(vhtcap[1], sizeof(struct ieee80211_ie_vhtcap) - 2, return); } if (vhtinfo != NULL) { IEEE80211_VERIFY_LENGTH(vhtinfo[1], sizeof(struct ieee80211_ie_vht_operation) - 2, return); } if ((vap->iv_flags & IEEE80211_F_WPA) && !wpa_assocreq(ni, &rsnparms, wh, wpa, rsn, capinfo)) return; /* discard challenge after association */ if (ni->ni_challenge != NULL) { IEEE80211_FREE(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } /* NB: 802.11 spec says to ignore station's privacy bit */ if ((capinfo & IEEE80211_CAPINFO_ESS) == 0) { capinfomismatch(ni, wh, reassoc, resp, "capability", capinfo); return; } /* * Disallow re-associate w/ invalid slot time setting. */ if (ni->ni_associd != 0 && IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) && ((ni->ni_capinfo ^ capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME)) { capinfomismatch(ni, wh, reassoc, resp, "slot time", capinfo); return; } rate = ieee80211_setup_rates(ni, rates, xrates, IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE | IEEE80211_F_DONEGO | IEEE80211_F_DODEL); if (rate & IEEE80211_RATE_BASIC) { ratesetmismatch(ni, wh, reassoc, resp, "legacy", rate); vap->iv_stats.is_rx_assoc_norate++; return; } /* * If constrained to 11g-only stations reject an * 11b-only station. We cheat a bit here by looking * at the max negotiated xmit rate and assuming anyone * with a best rate <24Mb/s is an 11b station. */ if ((vap->iv_flags & IEEE80211_F_PUREG) && rate < 48) { ratesetmismatch(ni, wh, reassoc, resp, "11g", rate); vap->iv_stats.is_rx_assoc_norate++; return; } /* * Do HT rate set handling and setup HT node state. */ ni->ni_chan = vap->iv_bss->ni_chan; /* VHT */ if (IEEE80211_IS_CHAN_VHT(ni->ni_chan) && vhtcap != NULL && vhtinfo != NULL) { /* XXX TODO; see below */ printf("%s: VHT TODO!\n", __func__); ieee80211_vht_node_init(ni); ieee80211_vht_update_cap(ni, vhtcap, vhtinfo); } else if (ni->ni_flags & IEEE80211_NODE_VHT) ieee80211_vht_node_cleanup(ni); /* HT */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && htcap != NULL) { rate = ieee80211_setup_htrates(ni, htcap, IEEE80211_F_DOFMCS | IEEE80211_F_DONEGO | IEEE80211_F_DOBRS); if (rate & IEEE80211_RATE_BASIC) { ratesetmismatch(ni, wh, reassoc, resp, "HT", rate); vap->iv_stats.is_ht_assoc_norate++; return; } ieee80211_ht_node_init(ni); ieee80211_ht_updatehtcap(ni, htcap); } else if (ni->ni_flags & IEEE80211_NODE_HT) ieee80211_ht_node_cleanup(ni); /* Finally - this will use HT/VHT info to change node channel */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && htcap != NULL) { ieee80211_ht_updatehtcap_final(ni); } #ifdef IEEE80211_SUPPORT_SUPERG /* Always do ff node cleanup; for A-MSDU */ ieee80211_ff_node_cleanup(ni); #endif /* * Allow AMPDU operation only with unencrypted traffic * or AES-CCM; the 11n spec only specifies these ciphers * so permitting any others is undefined and can lead * to interoperability problems. */ if ((ni->ni_flags & IEEE80211_NODE_HT) && (((vap->iv_flags & IEEE80211_F_WPA) && rsnparms.rsn_ucastcipher != IEEE80211_CIPHER_AES_CCM) || (vap->iv_flags & (IEEE80211_F_WPA|IEEE80211_F_PRIVACY)) == IEEE80211_F_PRIVACY)) { IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "disallow HT use because WEP or TKIP requested, " "capinfo 0x%x ucastcipher %d", capinfo, rsnparms.rsn_ucastcipher); ieee80211_ht_node_cleanup(ni); #ifdef IEEE80211_SUPPORT_SUPERG /* Always do ff node cleanup; for A-MSDU */ ieee80211_ff_node_cleanup(ni); #endif vap->iv_stats.is_ht_assoc_downgrade++; } /* * If constrained to 11n-only stations reject legacy stations. */ if ((vap->iv_flags_ht & IEEE80211_FHT_PUREN) && (ni->ni_flags & IEEE80211_NODE_HT) == 0) { htcapmismatch(ni, wh, reassoc, resp); vap->iv_stats.is_ht_assoc_nohtcap++; return; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; ni->ni_intval = lintval; ni->ni_capinfo = capinfo; ni->ni_fhdwell = vap->iv_bss->ni_fhdwell; ni->ni_fhindex = vap->iv_bss->ni_fhindex; /* * Store the IEs. * XXX maybe better to just expand */ if (ieee80211_ies_init(&ni->ni_ies, sfrm, efrm - sfrm)) { #define setie(_ie, _off) ieee80211_ies_setie(ni->ni_ies, _ie, _off) if (wpa != NULL) setie(wpa_ie, wpa - sfrm); if (rsn != NULL) setie(rsn_ie, rsn - sfrm); if (htcap != NULL) setie(htcap_ie, htcap - sfrm); if (wme != NULL) { setie(wme_ie, wme - sfrm); /* * Mark node as capable of QoS. */ ni->ni_flags |= IEEE80211_NODE_QOS; if (ieee80211_parse_wmeie(wme, wh, ni) > 0) { if (ni->ni_uapsd != 0) ni->ni_flags |= IEEE80211_NODE_UAPSD; else ni->ni_flags &= ~IEEE80211_NODE_UAPSD; } } else ni->ni_flags &= ~(IEEE80211_NODE_QOS | IEEE80211_NODE_UAPSD); #ifdef IEEE80211_SUPPORT_SUPERG if (ath != NULL) { setie(ath_ie, ath - sfrm); /* * Parse ATH station parameters. */ ieee80211_parse_ath(ni, ni->ni_ies.ath_ie); } else #endif ni->ni_ath_flags = 0; #undef setie } else { ni->ni_flags &= ~IEEE80211_NODE_QOS; ni->ni_flags &= ~IEEE80211_NODE_UAPSD; ni->ni_ath_flags = 0; } ieee80211_node_join(ni, resp); ieee80211_deliver_l2uf(ni); break; } case IEEE80211_FC0_SUBTYPE_DEAUTH: case IEEE80211_FC0_SUBTYPE_DISASSOC: { #ifdef IEEE80211_DEBUG uint16_t reason; #endif if (vap->iv_state != IEEE80211_S_RUN || /* NB: can happen when in promiscuous mode */ !IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) { vap->iv_stats.is_rx_mgtdiscard++; break; } /* * deauth/disassoc frame format * [2] reason */ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return); #ifdef IEEE80211_DEBUG reason = le16toh(*(uint16_t *)frm); #endif if (subtype == IEEE80211_FC0_SUBTYPE_DEAUTH) { vap->iv_stats.is_rx_deauth++; IEEE80211_NODE_STAT(ni, rx_deauth); } else { vap->iv_stats.is_rx_disassoc++; IEEE80211_NODE_STAT(ni, rx_disassoc); } IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "recv %s (reason: %d (%s))", ieee80211_mgt_subtype_name(subtype), reason, ieee80211_reason_to_string(reason)); if (ni != vap->iv_bss) ieee80211_node_leave(ni); break; } case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } static void hostap_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_PS_POLL: ni->ni_vap->iv_recv_pspoll(ni, m); break; case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } /* * Process a received ps-poll frame. */ void ieee80211_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame_min *wh; struct mbuf *m; uint16_t aid; int qlen; wh = mtod(m0, struct ieee80211_frame_min *); if (ni->ni_associd == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG, (struct ieee80211_frame *) wh, NULL, "%s", "unassociated station"); vap->iv_stats.is_ps_unassoc++; IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_NOT_ASSOCED); return; } aid = le16toh(*(uint16_t *)wh->i_dur); if (aid != ni->ni_associd) { IEEE80211_DISCARD(vap, IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG, (struct ieee80211_frame *) wh, NULL, "aid mismatch: sta aid 0x%x poll aid 0x%x", ni->ni_associd, aid); vap->iv_stats.is_ps_badaid++; /* * NB: We used to deauth the station but it turns out * the Blackberry Curve 8230 (and perhaps other devices) * sometimes send the wrong AID when WME is negotiated. * Being more lenient here seems ok as we already check * the station is associated and we only return frames * queued for the station (i.e. we don't use the AID). */ return; } /* Okay, take the first queued packet and put it out... */ m = ieee80211_node_psq_dequeue(ni, &qlen); if (m == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2, "%s", "recv ps-poll, but queue empty"); ieee80211_send_nulldata(ieee80211_ref_node(ni)); vap->iv_stats.is_ps_qempty++; /* XXX node stat */ if (vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); /* just in case */ return; } /* * If there are more packets, set the more packets bit * in the packet dispatched to the station; otherwise * turn off the TIM bit. */ if (qlen != 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "recv ps-poll, send packet, %u still queued", qlen); m->m_flags |= M_MORE_DATA; } else { IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "%s", "recv ps-poll, send packet, queue empty"); if (vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); } m->m_flags |= M_PWR_SAV; /* bypass PS handling */ /* * Do the right thing; if it's an encap'ed frame then * call ieee80211_parent_xmitpkt() else * call ieee80211_vap_xmitpkt(). */ if (m->m_flags & M_ENCAP) { (void) ieee80211_parent_xmitpkt(ic, m); } else { (void) ieee80211_vap_xmitpkt(vap, m); } } diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c index e680db5faa52..2fbb5a10febf 100644 --- a/sys/net80211/ieee80211_ht.c +++ b/sys/net80211/ieee80211_ht.c @@ -1,3611 +1,3611 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11n protocol support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include const struct ieee80211_mcs_rates ieee80211_htrates[IEEE80211_HTRATE_MAXSIZE] = { { 13, 14, 27, 30 }, /* MCS 0 */ { 26, 29, 54, 60 }, /* MCS 1 */ { 39, 43, 81, 90 }, /* MCS 2 */ { 52, 58, 108, 120 }, /* MCS 3 */ { 78, 87, 162, 180 }, /* MCS 4 */ { 104, 116, 216, 240 }, /* MCS 5 */ { 117, 130, 243, 270 }, /* MCS 6 */ { 130, 144, 270, 300 }, /* MCS 7 */ { 26, 29, 54, 60 }, /* MCS 8 */ { 52, 58, 108, 120 }, /* MCS 9 */ { 78, 87, 162, 180 }, /* MCS 10 */ { 104, 116, 216, 240 }, /* MCS 11 */ { 156, 173, 324, 360 }, /* MCS 12 */ { 208, 231, 432, 480 }, /* MCS 13 */ { 234, 260, 486, 540 }, /* MCS 14 */ { 260, 289, 540, 600 }, /* MCS 15 */ { 39, 43, 81, 90 }, /* MCS 16 */ { 78, 87, 162, 180 }, /* MCS 17 */ { 117, 130, 243, 270 }, /* MCS 18 */ { 156, 173, 324, 360 }, /* MCS 19 */ { 234, 260, 486, 540 }, /* MCS 20 */ { 312, 347, 648, 720 }, /* MCS 21 */ { 351, 390, 729, 810 }, /* MCS 22 */ { 390, 433, 810, 900 }, /* MCS 23 */ { 52, 58, 108, 120 }, /* MCS 24 */ { 104, 116, 216, 240 }, /* MCS 25 */ { 156, 173, 324, 360 }, /* MCS 26 */ { 208, 231, 432, 480 }, /* MCS 27 */ { 312, 347, 648, 720 }, /* MCS 28 */ { 416, 462, 864, 960 }, /* MCS 29 */ { 468, 520, 972, 1080 }, /* MCS 30 */ { 520, 578, 1080, 1200 }, /* MCS 31 */ { 0, 0, 12, 13 }, /* MCS 32 */ { 78, 87, 162, 180 }, /* MCS 33 */ { 104, 116, 216, 240 }, /* MCS 34 */ { 130, 144, 270, 300 }, /* MCS 35 */ { 117, 130, 243, 270 }, /* MCS 36 */ { 156, 173, 324, 360 }, /* MCS 37 */ { 195, 217, 405, 450 }, /* MCS 38 */ { 104, 116, 216, 240 }, /* MCS 39 */ { 130, 144, 270, 300 }, /* MCS 40 */ { 130, 144, 270, 300 }, /* MCS 41 */ { 156, 173, 324, 360 }, /* MCS 42 */ { 182, 202, 378, 420 }, /* MCS 43 */ { 182, 202, 378, 420 }, /* MCS 44 */ { 208, 231, 432, 480 }, /* MCS 45 */ { 156, 173, 324, 360 }, /* MCS 46 */ { 195, 217, 405, 450 }, /* MCS 47 */ { 195, 217, 405, 450 }, /* MCS 48 */ { 234, 260, 486, 540 }, /* MCS 49 */ { 273, 303, 567, 630 }, /* MCS 50 */ { 273, 303, 567, 630 }, /* MCS 51 */ { 312, 347, 648, 720 }, /* MCS 52 */ { 130, 144, 270, 300 }, /* MCS 53 */ { 156, 173, 324, 360 }, /* MCS 54 */ { 182, 202, 378, 420 }, /* MCS 55 */ { 156, 173, 324, 360 }, /* MCS 56 */ { 182, 202, 378, 420 }, /* MCS 57 */ { 208, 231, 432, 480 }, /* MCS 58 */ { 234, 260, 486, 540 }, /* MCS 59 */ { 208, 231, 432, 480 }, /* MCS 60 */ { 234, 260, 486, 540 }, /* MCS 61 */ { 260, 289, 540, 600 }, /* MCS 62 */ { 260, 289, 540, 600 }, /* MCS 63 */ { 286, 318, 594, 660 }, /* MCS 64 */ { 195, 217, 405, 450 }, /* MCS 65 */ { 234, 260, 486, 540 }, /* MCS 66 */ { 273, 303, 567, 630 }, /* MCS 67 */ { 234, 260, 486, 540 }, /* MCS 68 */ { 273, 303, 567, 630 }, /* MCS 69 */ { 312, 347, 648, 720 }, /* MCS 70 */ { 351, 390, 729, 810 }, /* MCS 71 */ { 312, 347, 648, 720 }, /* MCS 72 */ { 351, 390, 729, 810 }, /* MCS 73 */ { 390, 433, 810, 900 }, /* MCS 74 */ { 390, 433, 810, 900 }, /* MCS 75 */ { 429, 477, 891, 990 }, /* MCS 76 */ }; static int ieee80211_ampdu_age = -1; /* threshold for ampdu reorder q (ms) */ SYSCTL_PROC(_net_wlan, OID_AUTO, ampdu_age, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_ampdu_age, 0, ieee80211_sysctl_msecs_ticks, "I", "AMPDU max reorder age (ms)"); static int ieee80211_recv_bar_ena = 1; SYSCTL_INT(_net_wlan, OID_AUTO, recv_bar, CTLFLAG_RW, &ieee80211_recv_bar_ena, 0, "BAR frame processing (ena/dis)"); static int ieee80211_addba_timeout = -1;/* timeout for ADDBA response */ SYSCTL_PROC(_net_wlan, OID_AUTO, addba_timeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_addba_timeout, 0, ieee80211_sysctl_msecs_ticks, "I", "ADDBA request timeout (ms)"); static int ieee80211_addba_backoff = -1;/* backoff after max ADDBA requests */ SYSCTL_PROC(_net_wlan, OID_AUTO, addba_backoff, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_addba_backoff, 0, ieee80211_sysctl_msecs_ticks, "I", "ADDBA request backoff (ms)"); static int ieee80211_addba_maxtries = 3;/* max ADDBA requests before backoff */ SYSCTL_INT(_net_wlan, OID_AUTO, addba_maxtries, CTLFLAG_RW, &ieee80211_addba_maxtries, 0, "max ADDBA requests sent before backoff"); static int ieee80211_bar_timeout = -1; /* timeout waiting for BAR response */ static int ieee80211_bar_maxtries = 50;/* max BAR requests before DELBA */ static ieee80211_recv_action_func ht_recv_action_ba_addba_request; static ieee80211_recv_action_func ht_recv_action_ba_addba_response; static ieee80211_recv_action_func ht_recv_action_ba_delba; static ieee80211_recv_action_func ht_recv_action_ht_mimopwrsave; static ieee80211_recv_action_func ht_recv_action_ht_txchwidth; static ieee80211_send_action_func ht_send_action_ba_addba; static ieee80211_send_action_func ht_send_action_ba_delba; static ieee80211_send_action_func ht_send_action_ht_txchwidth; static void ieee80211_ht_init(void) { /* * Setup HT parameters that depends on the clock frequency. */ ieee80211_ampdu_age = msecs_to_ticks(500); ieee80211_addba_timeout = msecs_to_ticks(250); ieee80211_addba_backoff = msecs_to_ticks(10*1000); ieee80211_bar_timeout = msecs_to_ticks(250); /* * Register action frame handlers. */ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_REQUEST, ht_recv_action_ba_addba_request); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_RESPONSE, ht_recv_action_ba_addba_response); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_DELBA, ht_recv_action_ba_delba); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_HT, IEEE80211_ACTION_HT_MIMOPWRSAVE, ht_recv_action_ht_mimopwrsave); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_HT, IEEE80211_ACTION_HT_TXCHWIDTH, ht_recv_action_ht_txchwidth); ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_REQUEST, ht_send_action_ba_addba); ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_RESPONSE, ht_send_action_ba_addba); ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_DELBA, ht_send_action_ba_delba); ieee80211_send_action_register(IEEE80211_ACTION_CAT_HT, IEEE80211_ACTION_HT_TXCHWIDTH, ht_send_action_ht_txchwidth); } SYSINIT(wlan_ht, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_ht_init, NULL); static int ieee80211_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap); static int ieee80211_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout); static int ieee80211_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout); static void ieee80211_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap); static void null_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap); static void ieee80211_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status); static void ampdu_tx_stop(struct ieee80211_tx_ampdu *tap); static void bar_stop_timer(struct ieee80211_tx_ampdu *tap); static int ampdu_rx_start(struct ieee80211_node *, struct ieee80211_rx_ampdu *, int baparamset, int batimeout, int baseqctl); static void ampdu_rx_stop(struct ieee80211_node *, struct ieee80211_rx_ampdu *); void ieee80211_ht_attach(struct ieee80211com *ic) { /* setup default aggregation policy */ ic->ic_recv_action = ieee80211_recv_action; ic->ic_send_action = ieee80211_send_action; ic->ic_ampdu_enable = ieee80211_ampdu_enable; ic->ic_addba_request = ieee80211_addba_request; ic->ic_addba_response = ieee80211_addba_response; ic->ic_addba_response_timeout = null_addba_response_timeout; ic->ic_addba_stop = ieee80211_addba_stop; ic->ic_bar_response = ieee80211_bar_response; ic->ic_ampdu_rx_start = ampdu_rx_start; ic->ic_ampdu_rx_stop = ampdu_rx_stop; ic->ic_htprotmode = IEEE80211_PROT_RTSCTS; ic->ic_curhtprotmode = IEEE80211_HTINFO_OPMODE_PURE; } void ieee80211_ht_detach(struct ieee80211com *ic) { } void ieee80211_ht_vattach(struct ieee80211vap *vap) { /* driver can override defaults */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_8K; vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_NA; vap->iv_ampdu_limit = vap->iv_ampdu_rxmax; vap->iv_amsdu_limit = vap->iv_htcaps & IEEE80211_HTCAP_MAXAMSDU; /* tx aggregation traffic thresholds */ vap->iv_ampdu_mintraffic[WME_AC_BK] = 128; vap->iv_ampdu_mintraffic[WME_AC_BE] = 64; vap->iv_ampdu_mintraffic[WME_AC_VO] = 32; vap->iv_ampdu_mintraffic[WME_AC_VI] = 32; vap->iv_htprotmode = IEEE80211_PROT_RTSCTS; vap->iv_curhtprotmode = IEEE80211_HTINFO_OPMODE_PURE; if (vap->iv_htcaps & IEEE80211_HTC_HT) { /* * Device is HT capable; enable all HT-related * facilities by default. * XXX these choices may be too aggressive. */ vap->iv_flags_ht |= IEEE80211_FHT_HT | IEEE80211_FHT_HTCOMPAT ; if (vap->iv_htcaps & IEEE80211_HTCAP_SHORTGI20) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20; /* XXX infer from channel list? */ if (vap->iv_htcaps & IEEE80211_HTCAP_CHWIDTH40) { vap->iv_flags_ht |= IEEE80211_FHT_USEHT40; if (vap->iv_htcaps & IEEE80211_HTCAP_SHORTGI40) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40; } /* enable RIFS if capable */ if (vap->iv_htcaps & IEEE80211_HTC_RIFS) vap->iv_flags_ht |= IEEE80211_FHT_RIFS; /* NB: A-MPDU and A-MSDU rx are mandated, these are tx only */ vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX; if (vap->iv_htcaps & IEEE80211_HTC_AMPDU) vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX; vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX; if (vap->iv_htcaps & IEEE80211_HTC_AMSDU) vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX; if (vap->iv_htcaps & IEEE80211_HTCAP_TXSTBC) vap->iv_flags_ht |= IEEE80211_FHT_STBC_TX; if (vap->iv_htcaps & IEEE80211_HTCAP_RXSTBC) vap->iv_flags_ht |= IEEE80211_FHT_STBC_RX; if (vap->iv_htcaps & IEEE80211_HTCAP_LDPC) vap->iv_flags_ht |= IEEE80211_FHT_LDPC_RX; if (vap->iv_htcaps & IEEE80211_HTC_TXLDPC) vap->iv_flags_ht |= IEEE80211_FHT_LDPC_TX; } /* NB: disable default legacy WDS, too many issues right now */ if (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) vap->iv_flags_ht &= ~IEEE80211_FHT_HT; } void ieee80211_ht_vdetach(struct ieee80211vap *vap) { } static int ht_getrate(struct ieee80211com *ic, int index, enum ieee80211_phymode mode, int ratetype) { int mword, rate; mword = ieee80211_rate2media(ic, index | IEEE80211_RATE_MCS, mode); if (IFM_SUBTYPE(mword) != IFM_IEEE80211_MCS) return (0); switch (ratetype) { case 0: rate = ieee80211_htrates[index].ht20_rate_800ns; break; case 1: rate = ieee80211_htrates[index].ht20_rate_400ns; break; case 2: rate = ieee80211_htrates[index].ht40_rate_800ns; break; default: rate = ieee80211_htrates[index].ht40_rate_400ns; break; } return (rate); } static struct printranges { int minmcs; int maxmcs; int txstream; int ratetype; int htcapflags; } ranges[] = { { 0, 7, 1, 0, 0 }, { 8, 15, 2, 0, 0 }, { 16, 23, 3, 0, 0 }, { 24, 31, 4, 0, 0 }, { 32, 0, 1, 2, IEEE80211_HTC_TXMCS32 }, { 33, 38, 2, 0, IEEE80211_HTC_TXUNEQUAL }, { 39, 52, 3, 0, IEEE80211_HTC_TXUNEQUAL }, { 53, 76, 4, 0, IEEE80211_HTC_TXUNEQUAL }, { 0, 0, 0, 0, 0 }, }; static void ht_rateprint(struct ieee80211com *ic, enum ieee80211_phymode mode, int ratetype) { int minrate, maxrate; struct printranges *range; for (range = ranges; range->txstream != 0; range++) { if (ic->ic_txstream < range->txstream) continue; if (range->htcapflags && (ic->ic_htcaps & range->htcapflags) == 0) continue; if (ratetype < range->ratetype) continue; minrate = ht_getrate(ic, range->minmcs, mode, ratetype); maxrate = ht_getrate(ic, range->maxmcs, mode, ratetype); if (range->maxmcs) { ic_printf(ic, "MCS %d-%d: %d%sMbps - %d%sMbps\n", range->minmcs, range->maxmcs, minrate/2, ((minrate & 0x1) != 0 ? ".5" : ""), maxrate/2, ((maxrate & 0x1) != 0 ? ".5" : "")); } else { ic_printf(ic, "MCS %d: %d%sMbps\n", range->minmcs, minrate/2, ((minrate & 0x1) != 0 ? ".5" : "")); } } } static void ht_announce(struct ieee80211com *ic, enum ieee80211_phymode mode) { const char *modestr = ieee80211_phymode_name[mode]; ic_printf(ic, "%s MCS 20MHz\n", modestr); ht_rateprint(ic, mode, 0); if (ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20) { ic_printf(ic, "%s MCS 20MHz SGI\n", modestr); ht_rateprint(ic, mode, 1); } if (ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) { ic_printf(ic, "%s MCS 40MHz:\n", modestr); ht_rateprint(ic, mode, 2); } if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) && (ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40)) { ic_printf(ic, "%s MCS 40MHz SGI:\n", modestr); ht_rateprint(ic, mode, 3); } } void ieee80211_ht_announce(struct ieee80211com *ic) { if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) || isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) ic_printf(ic, "%dT%dR\n", ic->ic_txstream, ic->ic_rxstream); if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA)) ht_announce(ic, IEEE80211_MODE_11NA); if (isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) ht_announce(ic, IEEE80211_MODE_11NG); } void ieee80211_init_suphtrates(struct ieee80211com *ic) { #define ADDRATE(x) do { \ htrateset->rs_rates[htrateset->rs_nrates] = x; \ htrateset->rs_nrates++; \ } while (0) struct ieee80211_htrateset *htrateset = &ic->ic_sup_htrates; int i; memset(htrateset, 0, sizeof(struct ieee80211_htrateset)); for (i = 0; i < ic->ic_txstream * 8; i++) ADDRATE(i); if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) && (ic->ic_htcaps & IEEE80211_HTC_TXMCS32)) ADDRATE(32); if (ic->ic_htcaps & IEEE80211_HTC_TXUNEQUAL) { if (ic->ic_txstream >= 2) { for (i = 33; i <= 38; i++) ADDRATE(i); } if (ic->ic_txstream >= 3) { for (i = 39; i <= 52; i++) ADDRATE(i); } if (ic->ic_txstream == 4) { for (i = 53; i <= 76; i++) ADDRATE(i); } } #undef ADDRATE } /* * Receive processing. */ /* * Decap the encapsulated A-MSDU frames and dispatch all but * the last for delivery. The last frame is returned for * delivery via the normal path. */ struct mbuf * ieee80211_decap_amsdu(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; int framelen; struct mbuf *n; /* discard 802.3 header inserted by ieee80211_decap */ m_adj(m, sizeof(struct ether_header)); vap->iv_stats.is_amsdu_decap++; for (;;) { /* * Decap the first frame, bust it apart from the * remainder and deliver. We leave the last frame * delivery to the caller (for consistency with other * code paths, could also do it here). */ m = ieee80211_decap1(m, &framelen); if (m == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "a-msdu", "%s", "decap failed"); vap->iv_stats.is_amsdu_tooshort++; return NULL; } if (m->m_pkthdr.len == framelen) break; - n = m_split(m, framelen, M_NOWAIT); + n = m_split(m, framelen, IEEE80211_M_NOWAIT); if (n == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "a-msdu", "%s", "unable to split encapsulated frames"); vap->iv_stats.is_amsdu_split++; m_freem(m); /* NB: must reclaim */ return NULL; } vap->iv_deliver_data(vap, ni, m); /* * Remove frame contents; each intermediate frame * is required to be aligned to a 4-byte boundary. */ m = n; m_adj(m, roundup2(framelen, 4) - framelen); /* padding */ } return m; /* last delivered by caller */ } static void ampdu_rx_purge_slot(struct ieee80211_rx_ampdu *rap, int i) { struct mbuf *m; /* Walk the queue, removing frames as appropriate */ while (mbufq_len(&rap->rxa_mq[i]) != 0) { m = mbufq_dequeue(&rap->rxa_mq[i]); if (m == NULL) break; rap->rxa_qbytes -= m->m_pkthdr.len; rap->rxa_qframes--; m_freem(m); } } /* * Add the given frame to the current RX reorder slot. * * For future offloaded A-MSDU handling where multiple frames with * the same sequence number show up here, this routine will append * those frames as long as they're appropriately tagged. */ static int ampdu_rx_add_slot(struct ieee80211_rx_ampdu *rap, int off, int tid, ieee80211_seq rxseq, struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs) { const struct ieee80211_rx_stats *rxs_final = NULL; struct ieee80211vap *vap = ni->ni_vap; int toss_dup; #define PROCESS 0 /* caller should process frame */ #define CONSUMED 1 /* frame consumed, caller does nothing */ /* * Figure out if this is a duplicate frame for the given slot. * * We're assuming that the driver will hand us all the frames * for a given AMSDU decap pass and if we get /a/ frame * for an AMSDU decap then we'll get all of them. * * The tricksy bit is that we don't know when the /end/ of * the decap pass is, because we aren't tracking state here * per-slot to know that we've finished receiving the frame list. * * The driver sets RX_F_AMSDU and RX_F_AMSDU_MORE to tell us * what's going on; so ideally we'd just check the frame at the * end of the reassembly slot to see if its F_AMSDU w/ no F_AMSDU_MORE - * that means we've received the whole AMSDU decap pass. */ /* * Get the rxs of the final mbuf in the slot, if one exists. */ if (mbufq_len(&rap->rxa_mq[off]) != 0) { rxs_final = ieee80211_get_rx_params_ptr(mbufq_last(&rap->rxa_mq[off])); } /* Default to tossing the duplicate frame */ toss_dup = 1; /* * Check to see if the final frame has F_AMSDU and F_AMSDU set, AND * this frame has F_AMSDU set (MORE or otherwise.) That's a sign * that more can come. */ if ((rxs != NULL) && (rxs_final != NULL) && ieee80211_check_rxseq_amsdu(rxs) && ieee80211_check_rxseq_amsdu(rxs_final)) { if (! ieee80211_check_rxseq_amsdu_more(rxs_final)) { /* * amsdu_more() returning 0 means "it's not the * final frame" so we can append more * frames here. */ toss_dup = 0; } } /* * If the list is empty OR we have determined we can put more * driver decap'ed AMSDU frames in here, then insert. */ if ((mbufq_len(&rap->rxa_mq[off]) == 0) || (toss_dup == 0)) { if (mbufq_enqueue(&rap->rxa_mq[off], m) != 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "a-mpdu queue fail", "seqno %u tid %u BA win <%u:%u> off=%d, qlen=%d, maxqlen=%d", rxseq, tid, rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), off, mbufq_len(&rap->rxa_mq[off]), rap->rxa_mq[off].mq_maxlen); /* XXX error count */ m_freem(m); return CONSUMED; } rap->rxa_qframes++; rap->rxa_qbytes += m->m_pkthdr.len; vap->iv_stats.is_ampdu_rx_reorder++; /* * Statistics for AMSDU decap. */ if (rxs != NULL && ieee80211_check_rxseq_amsdu(rxs)) { if (ieee80211_check_rxseq_amsdu_more(rxs)) { /* more=1, AMSDU, end of batch */ IEEE80211_NODE_STAT(ni, rx_amsdu_more_end); } else { IEEE80211_NODE_STAT(ni, rx_amsdu_more); } } } else { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "a-mpdu duplicate", "seqno %u tid %u BA win <%u:%u>", rxseq, tid, rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1)); if (rxs != NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "a-mpdu duplicate", "seqno %d tid %u pktflags 0x%08x\n", rxseq, tid, rxs->c_pktflags); } if (rxs_final != NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "a-mpdu duplicate", "final: pktflags 0x%08x\n", rxs_final->c_pktflags); } vap->iv_stats.is_rx_dup++; IEEE80211_NODE_STAT(ni, rx_dup); m_freem(m); } return CONSUMED; #undef CONSUMED #undef PROCESS } /* * Purge all frames in the A-MPDU re-order queue. */ static void ampdu_rx_purge(struct ieee80211_rx_ampdu *rap) { int i; for (i = 0; i < rap->rxa_wnd; i++) { ampdu_rx_purge_slot(rap, i); if (rap->rxa_qframes == 0) break; } KASSERT(rap->rxa_qbytes == 0 && rap->rxa_qframes == 0, ("lost %u data, %u frames on ampdu rx q", rap->rxa_qbytes, rap->rxa_qframes)); } static void ieee80211_ampdu_rx_init_rap(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { int i; /* XXX TODO: ensure the queues are empty */ memset(rap, 0, sizeof(*rap)); for (i = 0; i < IEEE80211_AGGR_BAWMAX; i++) mbufq_init(&rap->rxa_mq[i], 256); } /* * Start A-MPDU rx/re-order processing for the specified TID. */ static int ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, int baparamset, int batimeout, int baseqctl) { struct ieee80211vap *vap = ni->ni_vap; int bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ); if (rap->rxa_flags & IEEE80211_AGGR_RUNNING) { /* * AMPDU previously setup and not terminated with a DELBA, * flush the reorder q's in case anything remains. */ ampdu_rx_purge(rap); } ieee80211_ampdu_rx_init_rap(ni, rap); rap->rxa_wnd = (bufsiz == 0) ? IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX); rap->rxa_start = _IEEE80211_MASKSHIFT(baseqctl, IEEE80211_BASEQ_START); rap->rxa_flags |= IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_XCHGPEND; /* XXX this should be a configuration flag */ if ((vap->iv_htcaps & IEEE80211_HTC_RX_AMSDU_AMPDU) && (_IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU))) rap->rxa_flags |= IEEE80211_AGGR_AMSDU; else rap->rxa_flags &= ~IEEE80211_AGGR_AMSDU; return 0; } /* * Public function; manually setup the RX ampdu state. */ int ieee80211_ampdu_rx_start_ext(struct ieee80211_node *ni, int tid, int seq, int baw) { struct ieee80211_rx_ampdu *rap; /* XXX TODO: sanity check tid, seq, baw */ rap = &ni->ni_rx_ampdu[tid]; if (rap->rxa_flags & IEEE80211_AGGR_RUNNING) { /* * AMPDU previously setup and not terminated with a DELBA, * flush the reorder q's in case anything remains. */ ampdu_rx_purge(rap); } ieee80211_ampdu_rx_init_rap(ni, rap); rap->rxa_wnd = (baw== 0) ? IEEE80211_AGGR_BAWMAX : min(baw, IEEE80211_AGGR_BAWMAX); if (seq == -1) { /* Wait for the first RX frame, use that as BAW */ rap->rxa_start = 0; rap->rxa_flags |= IEEE80211_AGGR_WAITRX; } else { rap->rxa_start = seq; } rap->rxa_flags |= IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_XCHGPEND; /* XXX TODO: no amsdu flag */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: tid=%d, start=%d, wnd=%d, flags=0x%08x", __func__, tid, seq, rap->rxa_wnd, rap->rxa_flags); return 0; } /* * Public function; manually stop the RX AMPDU state. */ void ieee80211_ampdu_rx_stop_ext(struct ieee80211_node *ni, int tid) { struct ieee80211_rx_ampdu *rap; /* XXX TODO: sanity check tid, seq, baw */ rap = &ni->ni_rx_ampdu[tid]; ampdu_rx_stop(ni, rap); } /* * Stop A-MPDU rx processing for the specified TID. */ static void ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { ampdu_rx_purge(rap); rap->rxa_flags &= ~(IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_XCHGPEND | IEEE80211_AGGR_WAITRX); } /* * Dispatch a frame from the A-MPDU reorder queue. The * frame is fed back into ieee80211_input marked with an * M_AMPDU_MPDU flag so it doesn't come back to us (it also * permits ieee80211_input to optimize re-processing). */ static __inline void ampdu_dispatch(struct ieee80211_node *ni, struct mbuf *m) { m->m_flags |= M_AMPDU_MPDU; /* bypass normal processing */ /* NB: rssi and noise are ignored w/ M_AMPDU_MPDU set */ (void) ieee80211_input(ni, m, 0, 0); } static int ampdu_dispatch_slot(struct ieee80211_rx_ampdu *rap, struct ieee80211_node *ni, int i) { struct mbuf *m; int n = 0; while (mbufq_len(&rap->rxa_mq[i]) != 0) { m = mbufq_dequeue(&rap->rxa_mq[i]); if (m == NULL) break; n++; rap->rxa_qbytes -= m->m_pkthdr.len; rap->rxa_qframes--; ampdu_dispatch(ni, m); } return (n); } static void ampdu_rx_moveup(struct ieee80211_rx_ampdu *rap, struct ieee80211_node *ni, int i, int winstart) { struct ieee80211vap *vap = ni->ni_vap; /* * If frames remain, copy the mbuf pointers down so * they correspond to the offsets in the new window. */ if (rap->rxa_qframes != 0) { int n = rap->rxa_qframes, j; for (j = i+1; j < rap->rxa_wnd; j++) { /* * Concat the list contents over, which will * blank the source list for us. */ if (mbufq_len(&rap->rxa_mq[j]) != 0) { n = n - mbufq_len(&rap->rxa_mq[j]); mbufq_concat(&rap->rxa_mq[j-i], &rap->rxa_mq[j]); KASSERT(n >= 0, ("%s: n < 0 (%d)", __func__, n)); if (n == 0) break; } } KASSERT(n == 0, ("%s: lost %d frames, qframes %d off %d " "BA win <%d:%d> winstart %d", __func__, n, rap->rxa_qframes, i, rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), winstart)); vap->iv_stats.is_ampdu_rx_copy += rap->rxa_qframes; } } /* * Dispatch as many frames as possible from the re-order queue. * Frames will always be "at the front"; we process all frames * up to the first empty slot in the window. On completion we * cleanup state if there are still pending frames in the current * BA window. We assume the frame at slot 0 is already handled * by the caller; we always start at slot 1. */ static void ampdu_rx_dispatch(struct ieee80211_rx_ampdu *rap, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; int i, r, r2; /* flush run of frames */ r2 = 0; for (i = 1; i < rap->rxa_wnd; i++) { r = ampdu_dispatch_slot(rap, ni, i); if (r == 0) break; r2 += r; } /* move up frames */ ampdu_rx_moveup(rap, ni, i, -1); /* * Adjust the start of the BA window to * reflect the frames just dispatched. */ rap->rxa_start = IEEE80211_SEQ_ADD(rap->rxa_start, i); vap->iv_stats.is_ampdu_rx_oor += r2; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: moved slot up %d slots to start at %d (%d frames)", __func__, i, rap->rxa_start, r2); } /* * Dispatch all frames in the A-MPDU re-order queue. */ static void ampdu_rx_flush(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { int i, r; for (i = 0; i < rap->rxa_wnd; i++) { r = ampdu_dispatch_slot(rap, ni, i); if (r == 0) continue; ni->ni_vap->iv_stats.is_ampdu_rx_oor += r; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: moved slot up %d slots to start at %d (%d frames)", __func__, 1, rap->rxa_start, r); if (rap->rxa_qframes == 0) break; } } /* * Dispatch all frames in the A-MPDU re-order queue * preceding the specified sequence number. This logic * handles window moves due to a received MSDU or BAR. */ static void ampdu_rx_flush_upto(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, ieee80211_seq winstart) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_seq seqno; int i, r; /* * Flush any complete MSDU's with a sequence number lower * than winstart. Gaps may exist. Note that we may actually * dispatch frames past winstart if a run continues; this is * an optimization that avoids having to do a separate pass * to dispatch frames after moving the BA window start. */ seqno = rap->rxa_start; for (i = 0; i < rap->rxa_wnd; i++) { if ((r = mbufq_len(&rap->rxa_mq[i])) != 0) { (void) ampdu_dispatch_slot(rap, ni, i); } else { if (!IEEE80211_SEQ_BA_BEFORE(seqno, winstart)) break; } vap->iv_stats.is_ampdu_rx_oor += r; seqno = IEEE80211_SEQ_INC(seqno); IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: moved slot up %d slots to start at %d (%d frames)", __func__, 1, seqno, r); } /* * If frames remain, copy the mbuf pointers down so * they correspond to the offsets in the new window. */ ampdu_rx_moveup(rap, ni, i, winstart); /* * Move the start of the BA window; we use the * sequence number of the last MSDU that was * passed up the stack+1 or winstart if stopped on * a gap in the reorder buffer. */ rap->rxa_start = seqno; } /* * Process a received QoS data frame for an HT station. Handle * A-MPDU reordering: if this frame is received out of order * and falls within the BA window hold onto it. Otherwise if * this frame completes a run, flush any pending frames. We * return 1 if the frame is consumed. A 0 is returned if * the frame should be processed normally by the caller. * * A-MSDU: handle hardware decap'ed A-MSDU frames that are * pretending to be MPDU's. They're dispatched directly if * able; or attempted to put into the receive reordering slot. */ int ieee80211_ampdu_reorder(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs) { #define PROCESS 0 /* caller should process frame */ #define CONSUMED 1 /* frame consumed, caller does nothing */ struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_qosframe *wh; struct ieee80211_rx_ampdu *rap; ieee80211_seq rxseq; uint8_t tid; int off; int amsdu = ieee80211_check_rxseq_amsdu(rxs); int amsdu_end = ieee80211_check_rxseq_amsdu_more(rxs); KASSERT((m->m_flags & (M_AMPDU | M_AMPDU_MPDU)) == M_AMPDU, ("!a-mpdu or already re-ordered, flags 0x%x", m->m_flags)); KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT sta")); /* NB: m_len known to be sufficient */ wh = mtod(m, struct ieee80211_qosframe *); if (wh->i_fc[0] != IEEE80211_FC0_QOSDATA) { /* * Not QoS data, shouldn't get here but just * return it to the caller for processing. */ return PROCESS; } /* * 802.11-2012 9.3.2.10 - Duplicate detection and recovery. * * Multicast QoS data frames are checked against a different * counter, not the per-TID counter. */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) return PROCESS; tid = ieee80211_getqos(wh)[0]; tid &= IEEE80211_QOS_TID; rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) { /* * No ADDBA request yet, don't touch. */ return PROCESS; } rxseq = le16toh(*(uint16_t *)wh->i_seq); if ((rxseq & IEEE80211_SEQ_FRAG_MASK) != 0) { /* * Fragments are not allowed; toss. */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "A-MPDU", "fragment, rxseq 0x%x tid %u%s", rxseq, tid, wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : ""); vap->iv_stats.is_ampdu_rx_drop++; IEEE80211_NODE_STAT(ni, rx_drop); m_freem(m); return CONSUMED; } rxseq >>= IEEE80211_SEQ_SEQ_SHIFT; rap->rxa_nframes++; /* * Handle waiting for the first frame to define the BAW. * Some firmware doesn't provide the RX of the starting point * of the BAW and we have to cope. */ if (rap->rxa_flags & IEEE80211_AGGR_WAITRX) { rap->rxa_flags &= ~IEEE80211_AGGR_WAITRX; rap->rxa_start = rxseq; } again: if (rxseq == rap->rxa_start) { /* * First frame in window. */ if (rap->rxa_qframes != 0) { /* * Dispatch as many packets as we can. */ KASSERT((mbufq_len(&rap->rxa_mq[0]) == 0), ("unexpected dup")); ampdu_dispatch(ni, m); ampdu_rx_dispatch(rap, ni); return CONSUMED; } else { /* * In order; advance window if needed and notify * caller to dispatch directly. */ if (amsdu) { if (amsdu_end) { rap->rxa_start = IEEE80211_SEQ_INC(rxseq); IEEE80211_NODE_STAT(ni, rx_amsdu_more_end); } else { IEEE80211_NODE_STAT(ni, rx_amsdu_more); } } else { rap->rxa_start = IEEE80211_SEQ_INC(rxseq); } return PROCESS; } } /* * Frame is out of order; store if in the BA window. */ /* calculate offset in BA window */ off = IEEE80211_SEQ_SUB(rxseq, rap->rxa_start); if (off < rap->rxa_wnd) { /* * Common case (hopefully): in the BA window. * Sec 9.10.7.6.2 a) (p.137) */ /* * Check for frames sitting too long in the reorder queue. * This should only ever happen if frames are not delivered * without the sender otherwise notifying us (e.g. with a * BAR to move the window). Typically this happens because * of vendor bugs that cause the sequence number to jump. * When this happens we get a gap in the reorder queue that * leaves frame sitting on the queue until they get pushed * out due to window moves. When the vendor does not send * BAR this move only happens due to explicit packet sends * * NB: we only track the time of the oldest frame in the * reorder q; this means that if we flush we might push * frames that still "new"; if this happens then subsequent * frames will result in BA window moves which cost something * but is still better than a big throughput dip. */ if (rap->rxa_qframes != 0) { /* XXX honor batimeout? */ if (ticks - rap->rxa_age > ieee80211_ampdu_age) { /* * Too long since we received the first * frame; flush the reorder buffer. */ if (rap->rxa_qframes != 0) { vap->iv_stats.is_ampdu_rx_age += rap->rxa_qframes; ampdu_rx_flush(ni, rap); } /* * Advance the window if needed and notify * the caller to dispatch directly. */ if (amsdu) { if (amsdu_end) { rap->rxa_start = IEEE80211_SEQ_INC(rxseq); IEEE80211_NODE_STAT(ni, rx_amsdu_more_end); } else { IEEE80211_NODE_STAT(ni, rx_amsdu_more); } } else { rap->rxa_start = IEEE80211_SEQ_INC(rxseq); } return PROCESS; } } else { /* * First frame, start aging timer. */ rap->rxa_age = ticks; } /* save packet - this consumes, no matter what */ ampdu_rx_add_slot(rap, off, tid, rxseq, ni, m, rxs); return CONSUMED; } if (off < IEEE80211_SEQ_BA_RANGE) { /* * Outside the BA window, but within range; * flush the reorder q and move the window. * Sec 9.10.7.6.2 b) (p.138) */ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni, "move BA win <%u:%u> (%u frames) rxseq %u tid %u", rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), rap->rxa_qframes, rxseq, tid); vap->iv_stats.is_ampdu_rx_move++; /* * The spec says to flush frames up to but not including: * WinStart_B = rxseq - rap->rxa_wnd + 1 * Then insert the frame or notify the caller to process * it immediately. We can safely do this by just starting * over again because we know the frame will now be within * the BA window. */ /* NB: rxa_wnd known to be >0 */ ampdu_rx_flush_upto(ni, rap, IEEE80211_SEQ_SUB(rxseq, rap->rxa_wnd-1)); goto again; } else { /* * Outside the BA window and out of range; toss. * Sec 9.10.7.6.2 c) (p.138) */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "MPDU", "BA win <%u:%u> (%u frames) rxseq %u tid %u%s", rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), rap->rxa_qframes, rxseq, tid, wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : ""); vap->iv_stats.is_ampdu_rx_drop++; IEEE80211_NODE_STAT(ni, rx_drop); m_freem(m); return CONSUMED; } #undef CONSUMED #undef PROCESS } /* * Process a BAR ctl frame. Dispatch all frames up to * the sequence number of the frame. If this frame is * out of range it's discarded. */ void ieee80211_recv_bar(struct ieee80211_node *ni, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame_bar *wh; struct ieee80211_rx_ampdu *rap; ieee80211_seq rxseq; int tid, off; if (!ieee80211_recv_bar_ena) { #if 0 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_11N, ni->ni_macaddr, "BAR", "%s", "processing disabled"); #endif vap->iv_stats.is_ampdu_bar_bad++; return; } wh = mtod(m0, struct ieee80211_frame_bar *); /* XXX check basic BAR */ tid = _IEEE80211_MASKSHIFT(le16toh(wh->i_ctl), IEEE80211_BAR_TID); rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) { /* * No ADDBA request yet, don't touch. */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "BAR", "no BA stream, tid %u", tid); vap->iv_stats.is_ampdu_bar_bad++; return; } vap->iv_stats.is_ampdu_bar_rx++; rxseq = le16toh(wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; if (rxseq == rap->rxa_start) return; /* calculate offset in BA window */ off = IEEE80211_SEQ_SUB(rxseq, rap->rxa_start); if (off < IEEE80211_SEQ_BA_RANGE) { /* * Flush the reorder q up to rxseq and move the window. * Sec 9.10.7.6.3 a) (p.138) */ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni, "BAR moves BA win <%u:%u> (%u frames) rxseq %u tid %u", rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), rap->rxa_qframes, rxseq, tid); vap->iv_stats.is_ampdu_bar_move++; ampdu_rx_flush_upto(ni, rap, rxseq); if (off >= rap->rxa_wnd) { /* * BAR specifies a window start to the right of BA * window; we must move it explicitly since * ampdu_rx_flush_upto will not. */ rap->rxa_start = rxseq; } } else { /* * Out of range; toss. * Sec 9.10.7.6.3 b) (p.138) */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr, "BAR", "BA win <%u:%u> (%u frames) rxseq %u tid %u%s", rap->rxa_start, IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1), rap->rxa_qframes, rxseq, tid, wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : ""); vap->iv_stats.is_ampdu_bar_oow++; IEEE80211_NODE_STAT(ni, rx_drop); } } /* * Setup HT-specific state in a node. Called only * when HT use is negotiated so we don't do extra * work for temporary and/or legacy sta's. */ void ieee80211_ht_node_init(struct ieee80211_node *ni) { struct ieee80211_tx_ampdu *tap; int tid; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: called (%p)", __func__, ni); if (ni->ni_flags & IEEE80211_NODE_HT) { /* * Clean AMPDU state on re-associate. This handles the case * where a station leaves w/o notifying us and then returns * before node is reaped for inactivity. */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: calling cleanup (%p)", __func__, ni); ieee80211_ht_node_cleanup(ni); } for (tid = 0; tid < WME_NUM_TID; tid++) { tap = &ni->ni_tx_ampdu[tid]; tap->txa_tid = tid; tap->txa_ni = ni; ieee80211_txampdu_init_pps(tap); /* NB: further initialization deferred */ ieee80211_ampdu_rx_init_rap(ni, &ni->ni_rx_ampdu[tid]); } ni->ni_flags |= IEEE80211_NODE_HT | IEEE80211_NODE_AMPDU | IEEE80211_NODE_AMSDU; } /* * Cleanup HT-specific state in a node. Called only * when HT use has been marked. */ void ieee80211_ht_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; int i; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: called (%p)", __func__, ni); KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT node")); /* XXX optimize this */ for (i = 0; i < WME_NUM_TID; i++) { struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[i]; if (tap->txa_flags & IEEE80211_AGGR_SETUP) ampdu_tx_stop(tap); } for (i = 0; i < WME_NUM_TID; i++) ic->ic_ampdu_rx_stop(ni, &ni->ni_rx_ampdu[i]); ni->ni_htcap = 0; ni->ni_flags &= ~IEEE80211_NODE_HT_ALL; } /* * Age out HT resources for a station. */ void ieee80211_ht_node_age(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; uint8_t tid; KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT sta")); for (tid = 0; tid < WME_NUM_TID; tid++) { struct ieee80211_rx_ampdu *rap; rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) continue; if (rap->rxa_qframes == 0) continue; /* * Check for frames sitting too long in the reorder queue. * See above for more details on what's happening here. */ /* XXX honor batimeout? */ if (ticks - rap->rxa_age > ieee80211_ampdu_age) { /* * Too long since we received the first * frame; flush the reorder buffer. */ vap->iv_stats.is_ampdu_rx_age += rap->rxa_qframes; ampdu_rx_flush(ni, rap); } } } static struct ieee80211_channel * findhtchan(struct ieee80211com *ic, struct ieee80211_channel *c, int htflags) { return ieee80211_find_channel(ic, c->ic_freq, (c->ic_flags &~ IEEE80211_CHAN_HT) | htflags); } /* * Adjust a channel to be HT/non-HT according to the vap's configuration. */ struct ieee80211_channel * ieee80211_ht_adjust_channel(struct ieee80211com *ic, struct ieee80211_channel *chan, int flags) { struct ieee80211_channel *c; if (flags & IEEE80211_FHT_HT) { /* promote to HT if possible */ if (flags & IEEE80211_FHT_USEHT40) { if (!IEEE80211_IS_CHAN_HT40(chan)) { /* NB: arbitrarily pick ht40+ over ht40- */ c = findhtchan(ic, chan, IEEE80211_CHAN_HT40U); if (c == NULL) c = findhtchan(ic, chan, IEEE80211_CHAN_HT40D); if (c == NULL) c = findhtchan(ic, chan, IEEE80211_CHAN_HT20); if (c != NULL) chan = c; } } else if (!IEEE80211_IS_CHAN_HT20(chan)) { c = findhtchan(ic, chan, IEEE80211_CHAN_HT20); if (c != NULL) chan = c; } } else if (IEEE80211_IS_CHAN_HT(chan)) { /* demote to legacy, HT use is disabled */ c = ieee80211_find_channel(ic, chan->ic_freq, chan->ic_flags &~ IEEE80211_CHAN_HT); if (c != NULL) chan = c; } return chan; } /* * Setup HT-specific state for a legacy WDS peer. */ void ieee80211_ht_wds_init(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_tx_ampdu *tap; int tid; KASSERT(vap->iv_flags_ht & IEEE80211_FHT_HT, ("no HT requested")); /* XXX check scan cache in case peer has an ap and we have info */ /* * If setup with a legacy channel; locate an HT channel. * Otherwise if the inherited channel (from a companion * AP) is suitable use it so we use the same location * for the extension channel). */ ni->ni_chan = ieee80211_ht_adjust_channel(ni->ni_ic, ni->ni_chan, ieee80211_htchanflags(ni->ni_chan)); ni->ni_htcap = 0; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI20; if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { ni->ni_htcap |= IEEE80211_HTCAP_CHWIDTH40; ni->ni_chw = 40; if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_ABOVE; else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_BELOW; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI40; } else { ni->ni_chw = 20; ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_NONE; } ni->ni_htctlchan = ni->ni_chan->ic_ieee; if (vap->iv_flags_ht & IEEE80211_FHT_RIFS) ni->ni_flags |= IEEE80211_NODE_RIFS; /* XXX does it make sense to enable SMPS? */ ni->ni_htopmode = 0; /* XXX need protection state */ ni->ni_htstbc = 0; /* XXX need info */ for (tid = 0; tid < WME_NUM_TID; tid++) { tap = &ni->ni_tx_ampdu[tid]; tap->txa_tid = tid; ieee80211_txampdu_init_pps(tap); } /* NB: AMPDU tx/rx governed by IEEE80211_FHT_AMPDU_{TX,RX} */ ni->ni_flags |= IEEE80211_NODE_HT | IEEE80211_NODE_AMPDU | IEEE80211_NODE_AMSDU; } /* * Notify a VAP of a change in the HTINFO ie if it's a hostap VAP. * * This is to be called from the deferred HT protection update * task once the flags are updated. */ void ieee80211_htinfo_notify(struct ieee80211vap *vap) { IEEE80211_LOCK_ASSERT(vap->iv_ic); if (vap->iv_opmode != IEEE80211_M_HOSTAP) return; if (vap->iv_state != IEEE80211_S_RUN || !IEEE80211_IS_CHAN_HT(vap->iv_bss->ni_chan)) return; IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, vap->iv_bss, "HT bss occupancy change: %d sta, %d ht, " "%d ht40%s, HT protmode now 0x%x" , vap->iv_sta_assoc , vap->iv_ht_sta_assoc , vap->iv_ht40_sta_assoc , (vap->iv_flags_ht & IEEE80211_FHT_NONHT_PR) ? ", non-HT sta present" : "" , vap->iv_curhtprotmode); ieee80211_beacon_notify(vap, IEEE80211_BEACON_HTINFO); } /* * Calculate HT protection mode from current * state and handle updates. */ static void htinfo_update(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; uint8_t protmode; if (vap->iv_sta_assoc != vap->iv_ht_sta_assoc) { protmode = IEEE80211_HTINFO_OPMODE_MIXED | IEEE80211_HTINFO_NONHT_PRESENT; } else if (vap->iv_flags_ht & IEEE80211_FHT_NONHT_PR) { protmode = IEEE80211_HTINFO_OPMODE_PROTOPT | IEEE80211_HTINFO_NONHT_PRESENT; } else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_HT40(ic->ic_bsschan) && vap->iv_sta_assoc != vap->iv_ht40_sta_assoc) { protmode = IEEE80211_HTINFO_OPMODE_HT20PR; } else { protmode = IEEE80211_HTINFO_OPMODE_PURE; } if (protmode != vap->iv_curhtprotmode) { vap->iv_curhtprotmode = protmode; /* Update VAP with new protection mode */ ieee80211_vap_update_ht_protmode(vap); } } /* * Handle an HT station joining a BSS. */ void ieee80211_ht_node_join(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_LOCK_ASSERT(vap->iv_ic); if (ni->ni_flags & IEEE80211_NODE_HT) { vap->iv_ht_sta_assoc++; if (ni->ni_chw == 40) vap->iv_ht40_sta_assoc++; } htinfo_update(vap); } /* * Handle an HT station leaving a BSS. */ void ieee80211_ht_node_leave(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; IEEE80211_LOCK_ASSERT(vap->iv_ic); if (ni->ni_flags & IEEE80211_NODE_HT) { vap->iv_ht_sta_assoc--; if (ni->ni_chw == 40) vap->iv_ht40_sta_assoc--; } htinfo_update(vap); } /* * Public version of htinfo_update; used for processing * beacon frames from overlapping bss. * * Caller can specify either IEEE80211_HTINFO_OPMODE_MIXED * (on receipt of a beacon that advertises MIXED) or * IEEE80211_HTINFO_OPMODE_PROTOPT (on receipt of a beacon * from an overlapping legacy bss). We treat MIXED with * a higher precedence than PROTOPT (i.e. we will not change * change PROTOPT -> MIXED; only MIXED -> PROTOPT). This * corresponds to how we handle things in htinfo_update. * */ void ieee80211_htprot_update(struct ieee80211vap *vap, int protmode) { struct ieee80211com *ic = vap->iv_ic; #define OPMODE(x) _IEEE80211_SHIFTMASK(x, IEEE80211_HTINFO_OPMODE) IEEE80211_LOCK(ic); /* track non-HT station presence */ KASSERT(protmode & IEEE80211_HTINFO_NONHT_PRESENT, ("protmode 0x%x", protmode)); vap->iv_flags_ht |= IEEE80211_FHT_NONHT_PR; vap->iv_lastnonht = ticks; if (protmode != vap->iv_curhtprotmode && (OPMODE(vap->iv_curhtprotmode) != IEEE80211_HTINFO_OPMODE_MIXED || OPMODE(protmode) == IEEE80211_HTINFO_OPMODE_PROTOPT)) { vap->iv_curhtprotmode = protmode; /* Update VAP with new protection mode */ ieee80211_vap_update_ht_protmode(vap); } IEEE80211_UNLOCK(ic); #undef OPMODE } /* * Time out presence of an overlapping bss with non-HT * stations. When operating in hostap mode we listen for * beacons from other stations and if we identify a non-HT * station is present we update the opmode field of the * HTINFO ie. To identify when all non-HT stations are * gone we time out this condition. */ void ieee80211_ht_timeout(struct ieee80211vap *vap) { IEEE80211_LOCK_ASSERT(vap->iv_ic); if ((vap->iv_flags_ht & IEEE80211_FHT_NONHT_PR) && ieee80211_time_after(ticks, vap->iv_lastnonht + IEEE80211_NONHT_PRESENT_AGE)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_11N, "%s", "time out non-HT STA present on channel"); vap->iv_flags_ht &= ~IEEE80211_FHT_NONHT_PR; htinfo_update(vap); } } /* * Process an 802.11n HT capabilities ie. */ void ieee80211_parse_htcap(struct ieee80211_node *ni, const uint8_t *ie) { if (ie[0] == IEEE80211_ELEMID_VENDOR) { /* * Station used Vendor OUI ie to associate; * mark the node so when we respond we'll use * the Vendor OUI's and not the standard ie's. */ ni->ni_flags |= IEEE80211_NODE_HTCOMPAT; ie += 4; } else ni->ni_flags &= ~IEEE80211_NODE_HTCOMPAT; ni->ni_htcap = le16dec(ie + __offsetof(struct ieee80211_ie_htcap, hc_cap)); ni->ni_htparam = ie[__offsetof(struct ieee80211_ie_htcap, hc_param)]; } static void htinfo_parse(struct ieee80211_node *ni, const struct ieee80211_ie_htinfo *htinfo) { uint16_t w; ni->ni_htctlchan = htinfo->hi_ctrlchannel; ni->ni_ht2ndchan = _IEEE80211_SHIFTMASK(htinfo->hi_byte1, IEEE80211_HTINFO_2NDCHAN); w = le16dec(&htinfo->hi_byte2); ni->ni_htopmode = _IEEE80211_SHIFTMASK(w, IEEE80211_HTINFO_OPMODE); w = le16dec(&htinfo->hi_byte45); ni->ni_htstbc = _IEEE80211_SHIFTMASK(w, IEEE80211_HTINFO_BASIC_STBCMCS); } /* * Parse an 802.11n HT info ie and save useful information * to the node state. Note this does not effect any state * changes such as for channel width change. */ void ieee80211_parse_htinfo(struct ieee80211_node *ni, const uint8_t *ie) { if (ie[0] == IEEE80211_ELEMID_VENDOR) ie += 4; htinfo_parse(ni, (const struct ieee80211_ie_htinfo *) ie); } /* * Handle 11n/11ac channel switch. * * Use the received HT/VHT ie's to identify the right channel to use. * If we cannot locate it in the channel table then fallback to * legacy operation. * * Note that we use this information to identify the node's * channel only; the caller is responsible for insuring any * required channel change is done (e.g. in sta mode when * parsing the contents of a beacon frame). */ static int htinfo_update_chw(struct ieee80211_node *ni, int htflags, int vhtflags) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_channel *c; int chanflags; int ret = 0; /* * First step - do HT/VHT only channel lookup based on operating mode * flags. This involves masking out the VHT flags as well. * Otherwise we end up doing the full channel walk each time * we trigger this, which is expensive. */ chanflags = (ni->ni_chan->ic_flags &~ (IEEE80211_CHAN_HT | IEEE80211_CHAN_VHT)) | htflags | vhtflags; if (chanflags == ni->ni_chan->ic_flags) goto done; /* * If HT /or/ VHT flags have changed then check both. * We need to start by picking a HT channel anyway. */ c = NULL; chanflags = (ni->ni_chan->ic_flags &~ (IEEE80211_CHAN_HT | IEEE80211_CHAN_VHT)) | htflags; /* XXX not right for ht40- */ c = ieee80211_find_channel(ic, ni->ni_chan->ic_freq, chanflags); if (c == NULL && (htflags & IEEE80211_CHAN_HT40)) { /* * No HT40 channel entry in our table; fall back * to HT20 operation. This should not happen. */ c = findhtchan(ic, ni->ni_chan, IEEE80211_CHAN_HT20); #if 0 IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "no HT40 channel (freq %u), falling back to HT20", ni->ni_chan->ic_freq); #endif /* XXX stat */ } /* Nothing found - leave it alone; move onto VHT */ if (c == NULL) c = ni->ni_chan; /* * If it's non-HT, then bail out now. */ if (! IEEE80211_IS_CHAN_HT(c)) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "not HT; skipping VHT check (%u/0x%x)", c->ic_freq, c->ic_flags); goto done; } /* * Next step - look at the current VHT flags and determine * if we need to upgrade. Mask out the VHT and HT flags since * the vhtflags field will already have the correct HT * flags to use. */ if (IEEE80211_CONF_VHT(ic) && ni->ni_vhtcap != 0 && vhtflags != 0) { chanflags = (c->ic_flags &~ (IEEE80211_CHAN_HT | IEEE80211_CHAN_VHT)) | vhtflags; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "%s: VHT; chanwidth=0x%02x; vhtflags=0x%08x", __func__, ni->ni_vht_chanwidth, vhtflags); IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "%s: VHT; trying lookup for %d/0x%08x", __func__, c->ic_freq, chanflags); c = ieee80211_find_channel(ic, c->ic_freq, chanflags); } /* Finally, if it's changed */ if (c != NULL && c != ni->ni_chan) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni, "switch station to %s%d channel %u/0x%x", IEEE80211_IS_CHAN_VHT(c) ? "VHT" : "HT", IEEE80211_IS_CHAN_VHT80(c) ? 80 : (IEEE80211_IS_CHAN_HT40(c) ? 40 : 20), c->ic_freq, c->ic_flags); ni->ni_chan = c; ret = 1; } /* NB: caller responsible for forcing any channel change */ done: /* update node's (11n) tx channel width */ ni->ni_chw = IEEE80211_IS_CHAN_HT40(ni->ni_chan)? 40 : 20; return (ret); } /* * Update 11n MIMO PS state according to received htcap. */ static __inline int htcap_update_mimo_ps(struct ieee80211_node *ni) { uint16_t oflags = ni->ni_flags; switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { case IEEE80211_HTCAP_SMPS_DYNAMIC: ni->ni_flags |= IEEE80211_NODE_MIMO_PS; ni->ni_flags |= IEEE80211_NODE_MIMO_RTS; break; case IEEE80211_HTCAP_SMPS_ENA: ni->ni_flags |= IEEE80211_NODE_MIMO_PS; ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS; break; case IEEE80211_HTCAP_SMPS_OFF: default: /* disable on rx of reserved value */ ni->ni_flags &= ~IEEE80211_NODE_MIMO_PS; ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS; break; } return (oflags ^ ni->ni_flags); } /* * Update short GI state according to received htcap * and local settings. */ static __inline void htcap_update_shortgi(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; ni->ni_flags &= ~(IEEE80211_NODE_SGI20|IEEE80211_NODE_SGI40); if ((ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) && (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20)) ni->ni_flags |= IEEE80211_NODE_SGI20; if ((ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) && (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40)) ni->ni_flags |= IEEE80211_NODE_SGI40; } /* * Update LDPC state according to received htcap * and local settings. */ static __inline void htcap_update_ldpc(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; if ((ni->ni_htcap & IEEE80211_HTCAP_LDPC) && (vap->iv_flags_ht & IEEE80211_FHT_LDPC_TX)) ni->ni_flags |= IEEE80211_NODE_LDPC; } /* * Parse and update HT-related state extracted from * the HT cap and info ie's. * * This is called from the STA management path and * the ieee80211_node_join() path. It will take into * account the IEs discovered during scanning and * adjust things accordingly. */ void ieee80211_ht_updateparams(struct ieee80211_node *ni, const uint8_t *htcapie, const uint8_t *htinfoie) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_ie_htinfo *htinfo; ieee80211_parse_htcap(ni, htcapie); if (vap->iv_htcaps & IEEE80211_HTC_SMPS) htcap_update_mimo_ps(ni); htcap_update_shortgi(ni); htcap_update_ldpc(ni); if (htinfoie[0] == IEEE80211_ELEMID_VENDOR) htinfoie += 4; htinfo = (const struct ieee80211_ie_htinfo *) htinfoie; htinfo_parse(ni, htinfo); /* * Defer the node channel change; we need to now * update VHT parameters before we do it. */ if ((htinfo->hi_byte1 & IEEE80211_HTINFO_RIFSMODE_PERM) && (vap->iv_flags_ht & IEEE80211_FHT_RIFS)) ni->ni_flags |= IEEE80211_NODE_RIFS; else ni->ni_flags &= ~IEEE80211_NODE_RIFS; } static uint32_t ieee80211_vht_get_vhtflags(struct ieee80211_node *ni, uint32_t htflags) { struct ieee80211vap *vap = ni->ni_vap; uint32_t vhtflags = 0; vhtflags = 0; if (ni->ni_flags & IEEE80211_NODE_VHT && vap->iv_flags_vht & IEEE80211_FVHT_VHT) { if ((ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_160MHZ) && /* XXX 2 means "160MHz and 80+80MHz", 1 means "160MHz" */ (_IEEE80211_MASKSHIFT(vap->iv_vhtcaps, IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_MASK) >= 1) && (vap->iv_flags_vht & IEEE80211_FVHT_USEVHT160)) { vhtflags = IEEE80211_CHAN_VHT160; /* Mirror the HT40 flags */ if (htflags == IEEE80211_CHAN_HT40U) { vhtflags |= IEEE80211_CHAN_HT40U; } else if (htflags == IEEE80211_CHAN_HT40D) { vhtflags |= IEEE80211_CHAN_HT40D; } } else if ((ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_80P80MHZ) && /* XXX 2 means "160MHz and 80+80MHz" */ (_IEEE80211_MASKSHIFT(vap->iv_vhtcaps, IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_MASK) == 2) && (vap->iv_flags_vht & IEEE80211_FVHT_USEVHT80P80)) { vhtflags = IEEE80211_CHAN_VHT80P80; /* Mirror the HT40 flags */ if (htflags == IEEE80211_CHAN_HT40U) { vhtflags |= IEEE80211_CHAN_HT40U; } else if (htflags == IEEE80211_CHAN_HT40D) { vhtflags |= IEEE80211_CHAN_HT40D; } } else if ((ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_80MHZ) && (vap->iv_flags_vht & IEEE80211_FVHT_USEVHT80)) { vhtflags = IEEE80211_CHAN_VHT80; /* Mirror the HT40 flags */ if (htflags == IEEE80211_CHAN_HT40U) { vhtflags |= IEEE80211_CHAN_HT40U; } else if (htflags == IEEE80211_CHAN_HT40D) { vhtflags |= IEEE80211_CHAN_HT40D; } } else if (ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_USE_HT) { /* Mirror the HT40 flags */ /* * XXX TODO: if ht40 is disabled, but vht40 isn't * disabled then this logic will get very, very sad. * It's quite possible the only sane thing to do is * to not have vht40 as an option, and just obey * 'ht40' as that flag. */ if ((htflags == IEEE80211_CHAN_HT40U) && (vap->iv_flags_vht & IEEE80211_FVHT_USEVHT40)) { vhtflags = IEEE80211_CHAN_VHT40U | IEEE80211_CHAN_HT40U; } else if (htflags == IEEE80211_CHAN_HT40D && (vap->iv_flags_vht & IEEE80211_FVHT_USEVHT40)) { vhtflags = IEEE80211_CHAN_VHT40D | IEEE80211_CHAN_HT40D; } else if (htflags == IEEE80211_CHAN_HT20) { vhtflags = IEEE80211_CHAN_VHT20 | IEEE80211_CHAN_HT20; } } else { vhtflags = IEEE80211_CHAN_VHT20; } } return (vhtflags); } /* * Final part of updating the HT parameters. * * This is called from the STA management path and * the ieee80211_node_join() path. It will take into * account the IEs discovered during scanning and * adjust things accordingly. * * This is done after a call to ieee80211_ht_updateparams() * because it (and the upcoming VHT version of updateparams) * needs to ensure everything is parsed before htinfo_update_chw() * is called - which will change the channel config for the * node for us. */ int ieee80211_ht_updateparams_final(struct ieee80211_node *ni, const uint8_t *htcapie, const uint8_t *htinfoie) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_ie_htinfo *htinfo; int htflags, vhtflags; int ret = 0; htinfo = (const struct ieee80211_ie_htinfo *) htinfoie; htflags = (vap->iv_flags_ht & IEEE80211_FHT_HT) ? IEEE80211_CHAN_HT20 : 0; /* NB: honor operating mode constraint */ if ((htinfo->hi_byte1 & IEEE80211_HTINFO_TXWIDTH_2040) && (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)) { if (ni->ni_ht2ndchan == IEEE80211_HTINFO_2NDCHAN_ABOVE) htflags = IEEE80211_CHAN_HT40U; else if (ni->ni_ht2ndchan == IEEE80211_HTINFO_2NDCHAN_BELOW) htflags = IEEE80211_CHAN_HT40D; } /* * VHT flags - do much the same; check whether VHT is available * and if so, what our ideal channel use would be based on our * capabilities and the (pre-parsed) VHT info IE. */ vhtflags = ieee80211_vht_get_vhtflags(ni, htflags); if (htinfo_update_chw(ni, htflags, vhtflags)) ret = 1; return (ret); } /* * Parse and update HT-related state extracted from the HT cap ie * for a station joining an HT BSS. * * This is called from the hostap path for each station. */ void ieee80211_ht_updatehtcap(struct ieee80211_node *ni, const uint8_t *htcapie) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_parse_htcap(ni, htcapie); if (vap->iv_htcaps & IEEE80211_HTC_SMPS) htcap_update_mimo_ps(ni); htcap_update_shortgi(ni); htcap_update_ldpc(ni); } /* * Called once HT and VHT capabilities are parsed in hostap mode - * this will adjust the channel configuration of the given node * based on the configuration and capabilities. */ void ieee80211_ht_updatehtcap_final(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; int htflags; int vhtflags; /* NB: honor operating mode constraint */ /* XXX 40 MHz intolerant */ htflags = (vap->iv_flags_ht & IEEE80211_FHT_HT) ? IEEE80211_CHAN_HT20 : 0; if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) && (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)) { if (IEEE80211_IS_CHAN_HT40U(vap->iv_bss->ni_chan)) htflags = IEEE80211_CHAN_HT40U; else if (IEEE80211_IS_CHAN_HT40D(vap->iv_bss->ni_chan)) htflags = IEEE80211_CHAN_HT40D; } /* * VHT flags - do much the same; check whether VHT is available * and if so, what our ideal channel use would be based on our * capabilities and the (pre-parsed) VHT info IE. */ vhtflags = ieee80211_vht_get_vhtflags(ni, htflags); (void) htinfo_update_chw(ni, htflags, vhtflags); } /* * Install received HT rate set by parsing the HT cap ie. */ int ieee80211_setup_htrates(struct ieee80211_node *ni, const uint8_t *ie, int flags) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_ie_htcap *htcap; struct ieee80211_htrateset *rs; int i, maxequalmcs, maxunequalmcs; maxequalmcs = ic->ic_txstream * 8 - 1; maxunequalmcs = 0; if (ic->ic_htcaps & IEEE80211_HTC_TXUNEQUAL) { if (ic->ic_txstream >= 2) maxunequalmcs = 38; if (ic->ic_txstream >= 3) maxunequalmcs = 52; if (ic->ic_txstream >= 4) maxunequalmcs = 76; } rs = &ni->ni_htrates; memset(rs, 0, sizeof(*rs)); if (ie != NULL) { if (ie[0] == IEEE80211_ELEMID_VENDOR) ie += 4; htcap = (const struct ieee80211_ie_htcap *) ie; for (i = 0; i < IEEE80211_HTRATE_MAXSIZE; i++) { if (isclr(htcap->hc_mcsset, i)) continue; if (rs->rs_nrates == IEEE80211_HTRATE_MAXSIZE) { IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni, "WARNING, HT rate set too large; only " "using %u rates", IEEE80211_HTRATE_MAXSIZE); vap->iv_stats.is_rx_rstoobig++; break; } if (i <= 31 && i > maxequalmcs) continue; if (i == 32 && (ic->ic_htcaps & IEEE80211_HTC_TXMCS32) == 0) continue; if (i > 32 && i > maxunequalmcs) continue; rs->rs_rates[rs->rs_nrates++] = i; } } return ieee80211_fix_rate(ni, (struct ieee80211_rateset *) rs, flags); } /* * Mark rates in a node's HT rate set as basic according * to the information in the supplied HT info ie. */ void ieee80211_setup_basic_htrates(struct ieee80211_node *ni, const uint8_t *ie) { const struct ieee80211_ie_htinfo *htinfo; struct ieee80211_htrateset *rs; int i, j; if (ie[0] == IEEE80211_ELEMID_VENDOR) ie += 4; htinfo = (const struct ieee80211_ie_htinfo *) ie; rs = &ni->ni_htrates; if (rs->rs_nrates == 0) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni, "%s", "WARNING, empty HT rate set"); return; } for (i = 0; i < IEEE80211_HTRATE_MAXSIZE; i++) { if (isclr(htinfo->hi_basicmcsset, i)) continue; for (j = 0; j < rs->rs_nrates; j++) if ((rs->rs_rates[j] & IEEE80211_RATE_VAL) == i) rs->rs_rates[j] |= IEEE80211_RATE_BASIC; } } static void ampdu_tx_setup(struct ieee80211_tx_ampdu *tap) { callout_init(&tap->txa_timer, 1); tap->txa_flags |= IEEE80211_AGGR_SETUP; tap->txa_lastsample = ticks; } static void ampdu_tx_stop(struct ieee80211_tx_ampdu *tap) { struct ieee80211_node *ni = tap->txa_ni; struct ieee80211com *ic = ni->ni_ic; IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: called", __func__); KASSERT(tap->txa_flags & IEEE80211_AGGR_SETUP, ("txa_flags 0x%x tid %d ac %d", tap->txa_flags, tap->txa_tid, TID_TO_WME_AC(tap->txa_tid))); /* * Stop BA stream if setup so driver has a chance * to reclaim any resources it might have allocated. */ ic->ic_addba_stop(ni, tap); /* * Stop any pending BAR transmit. */ bar_stop_timer(tap); /* * Reset packet estimate. */ ieee80211_txampdu_init_pps(tap); /* NB: clearing NAK means we may re-send ADDBA */ tap->txa_flags &= ~(IEEE80211_AGGR_SETUP | IEEE80211_AGGR_NAK); } /* * ADDBA response timeout. * * If software aggregation and per-TID queue management was done here, * that queue would be unpaused after the ADDBA timeout occurs. */ static void addba_timeout(void *arg) { struct ieee80211_tx_ampdu *tap = arg; struct ieee80211_node *ni = tap->txa_ni; struct ieee80211com *ic = ni->ni_ic; /* XXX ? */ tap->txa_flags &= ~IEEE80211_AGGR_XCHGPEND; tap->txa_attempts++; ic->ic_addba_response_timeout(ni, tap); } static void addba_start_timeout(struct ieee80211_tx_ampdu *tap) { /* XXX use CALLOUT_PENDING instead? */ callout_reset(&tap->txa_timer, ieee80211_addba_timeout, addba_timeout, tap); tap->txa_flags |= IEEE80211_AGGR_XCHGPEND; tap->txa_nextrequest = ticks + ieee80211_addba_timeout; } static void addba_stop_timeout(struct ieee80211_tx_ampdu *tap) { /* XXX use CALLOUT_PENDING instead? */ if (tap->txa_flags & IEEE80211_AGGR_XCHGPEND) { callout_stop(&tap->txa_timer); tap->txa_flags &= ~IEEE80211_AGGR_XCHGPEND; } } static void null_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { } /* * Default method for requesting A-MPDU tx aggregation. * We setup the specified state block and start a timer * to wait for an ADDBA response frame. */ static int ieee80211_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { int bufsiz; /* XXX locking */ tap->txa_token = dialogtoken; tap->txa_flags |= IEEE80211_AGGR_IMMEDIATE; bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ); tap->txa_wnd = (bufsiz == 0) ? IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX); addba_start_timeout(tap); return 1; } /* * Called by drivers that wish to request an ADDBA session be * setup. This brings it up and starts the request timer. */ int ieee80211_ampdu_tx_request_ext(struct ieee80211_node *ni, int tid) { struct ieee80211_tx_ampdu *tap; if (tid < 0 || tid > 15) return (0); tap = &ni->ni_tx_ampdu[tid]; /* XXX locking */ if ((tap->txa_flags & IEEE80211_AGGR_SETUP) == 0) { /* do deferred setup of state */ ampdu_tx_setup(tap); } /* XXX hack for not doing proper locking */ tap->txa_flags &= ~IEEE80211_AGGR_NAK; addba_start_timeout(tap); return (1); } /* * Called by drivers that have marked a session as active. */ int ieee80211_ampdu_tx_request_active_ext(struct ieee80211_node *ni, int tid, int status) { struct ieee80211_tx_ampdu *tap; if (tid < 0 || tid > 15) return (0); tap = &ni->ni_tx_ampdu[tid]; /* XXX locking */ addba_stop_timeout(tap); if (status == 1) { tap->txa_flags |= IEEE80211_AGGR_RUNNING; tap->txa_attempts = 0; } else { /* mark tid so we don't try again */ tap->txa_flags |= IEEE80211_AGGR_NAK; } return (1); } /* * Default method for processing an A-MPDU tx aggregation * response. We shutdown any pending timer and update the * state block according to the reply. */ static int ieee80211_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int baparamset, int batimeout) { struct ieee80211vap *vap = ni->ni_vap; int bufsiz; /* XXX locking */ addba_stop_timeout(tap); if (status == IEEE80211_STATUS_SUCCESS) { bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ); /* XXX override our request? */ tap->txa_wnd = (bufsiz == 0) ? IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX); #ifdef __notyet__ tid = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID); #endif tap->txa_flags |= IEEE80211_AGGR_RUNNING; tap->txa_attempts = 0; /* TODO: this should be a vap flag */ if ((vap->iv_htcaps & IEEE80211_HTC_TX_AMSDU_AMPDU) && (ni->ni_flags & IEEE80211_NODE_AMSDU_TX) && (_IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU))) tap->txa_flags |= IEEE80211_AGGR_AMSDU; else tap->txa_flags &= ~IEEE80211_AGGR_AMSDU; } else { /* mark tid so we don't try again */ tap->txa_flags |= IEEE80211_AGGR_NAK; } return 1; } /* * Default method for stopping A-MPDU tx aggregation. * Any timer is cleared and we drain any pending frames. */ static void ieee80211_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { /* XXX locking */ addba_stop_timeout(tap); if (tap->txa_flags & IEEE80211_AGGR_RUNNING) { /* XXX clear aggregation queue */ tap->txa_flags &= ~(IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_AMSDU); } tap->txa_attempts = 0; } /* * Process a received action frame using the default aggregation * policy. We intercept ADDBA-related frames and use them to * update our aggregation state. All other frames are passed up * for processing by ieee80211_recv_action. */ static int ht_recv_action_ba_addba_request(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_rx_ampdu *rap; uint8_t dialogtoken; uint16_t baparamset, batimeout, baseqctl; uint16_t args[5]; int tid; dialogtoken = frm[2]; baparamset = le16dec(frm+3); batimeout = le16dec(frm+5); baseqctl = le16dec(frm+7); tid = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID); IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "recv ADDBA request: dialogtoken %u baparamset 0x%x " "(tid %d bufsiz %d) batimeout %d baseqctl %d:%d amsdu %d", dialogtoken, baparamset, tid, _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ), batimeout, _IEEE80211_MASKSHIFT(baseqctl, IEEE80211_BASEQ_START), _IEEE80211_MASKSHIFT(baseqctl, IEEE80211_BASEQ_FRAG), _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU)); rap = &ni->ni_rx_ampdu[tid]; /* Send ADDBA response */ args[0] = dialogtoken; /* * NB: We ack only if the sta associated with HT and * the ap is configured to do AMPDU rx (the latter * violates the 11n spec and is mostly for testing). */ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_RX) && (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX)) { /* XXX TODO: handle ampdu_rx_start failure */ ic->ic_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); args[1] = IEEE80211_STATUS_SUCCESS; } else { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "reject ADDBA request: %s", ni->ni_flags & IEEE80211_NODE_AMPDU_RX ? "administratively disabled" : "not negotiated for station"); vap->iv_stats.is_addba_reject++; args[1] = IEEE80211_STATUS_UNSPECIFIED; } /* XXX honor rap flags? */ args[2] = IEEE80211_BAPS_POLICY_IMMEDIATE | _IEEE80211_SHIFTMASK(tid, IEEE80211_BAPS_TID) | _IEEE80211_SHIFTMASK(rap->rxa_wnd, IEEE80211_BAPS_BUFSIZ) ; /* * TODO: we're out of iv_flags_ht fields; once * this is extended we should make this configurable. */ if ((baparamset & IEEE80211_BAPS_AMSDU) && (ni->ni_flags & IEEE80211_NODE_AMSDU_RX) && (vap->iv_htcaps & IEEE80211_HTC_RX_AMSDU_AMPDU)) args[2] |= IEEE80211_BAPS_AMSDU; args[3] = 0; args[4] = 0; ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_RESPONSE, args); return 0; } static int ht_recv_action_ba_addba_response(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_tx_ampdu *tap; uint8_t dialogtoken, policy; uint16_t baparamset, batimeout, code; int tid; #ifdef IEEE80211_DEBUG int amsdu, bufsiz; #endif dialogtoken = frm[2]; code = le16dec(frm+3); baparamset = le16dec(frm+5); tid = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID); #ifdef IEEE80211_DEBUG bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ); amsdu = !! _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU); #endif policy = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_POLICY); batimeout = le16dec(frm+7); tap = &ni->ni_tx_ampdu[tid]; if ((tap->txa_flags & IEEE80211_AGGR_XCHGPEND) == 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni->ni_macaddr, "ADDBA response", "no pending ADDBA, tid %d dialogtoken %u " "code %d", tid, dialogtoken, code); vap->iv_stats.is_addba_norequest++; return 0; } if (dialogtoken != tap->txa_token) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni->ni_macaddr, "ADDBA response", "dialogtoken mismatch: waiting for %d, " "received %d, tid %d code %d", tap->txa_token, dialogtoken, tid, code); vap->iv_stats.is_addba_badtoken++; return 0; } /* NB: assumes IEEE80211_AGGR_IMMEDIATE is 1 */ if (policy != (tap->txa_flags & IEEE80211_AGGR_IMMEDIATE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni->ni_macaddr, "ADDBA response", "policy mismatch: expecting %s, " "received %s, tid %d code %d", tap->txa_flags & IEEE80211_AGGR_IMMEDIATE, policy, tid, code); vap->iv_stats.is_addba_badpolicy++; return 0; } #if 0 /* XXX we take MIN in ieee80211_addba_response */ if (bufsiz > IEEE80211_AGGR_BAWMAX) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni->ni_macaddr, "ADDBA response", "BA window too large: max %d, " "received %d, tid %d code %d", bufsiz, IEEE80211_AGGR_BAWMAX, tid, code); vap->iv_stats.is_addba_badbawinsize++; return 0; } #endif IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "recv ADDBA response: dialogtoken %u code %d " "baparamset 0x%x (tid %d bufsiz %d amsdu %d) batimeout %d", dialogtoken, code, baparamset, tid, bufsiz, amsdu, batimeout); ic->ic_addba_response(ni, tap, code, baparamset, batimeout); return 0; } static int ht_recv_action_ba_delba(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_rx_ampdu *rap; struct ieee80211_tx_ampdu *tap; uint16_t baparamset; #ifdef IEEE80211_DEBUG uint16_t code; #endif int tid; baparamset = le16dec(frm+2); #ifdef IEEE80211_DEBUG code = le16dec(frm+4); #endif tid = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_DELBAPS_TID); IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "recv DELBA: baparamset 0x%x (tid %d initiator %d) " "code %d", baparamset, tid, _IEEE80211_MASKSHIFT(baparamset, IEEE80211_DELBAPS_INIT), code); if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) { tap = &ni->ni_tx_ampdu[tid]; ic->ic_addba_stop(ni, tap); } else { rap = &ni->ni_rx_ampdu[tid]; ic->ic_ampdu_rx_stop(ni, rap); } return 0; } static int ht_recv_action_ht_txchwidth(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { int chw; chw = (frm[2] == IEEE80211_A_HT_TXCHWIDTH_2040) ? 40 : 20; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "%s: HT txchwidth, width %d%s", __func__, chw, ni->ni_chw != chw ? "*" : ""); if (chw != ni->ni_chw) { /* XXX does this need to change the ht40 station count? */ ni->ni_chw = chw; /* XXX notify on change */ } return 0; } static int ht_recv_action_ht_mimopwrsave(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { const struct ieee80211_action_ht_mimopowersave *mps = (const struct ieee80211_action_ht_mimopowersave *) frm; /* XXX check iv_htcaps */ if (mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA) ni->ni_flags |= IEEE80211_NODE_MIMO_PS; else ni->ni_flags &= ~IEEE80211_NODE_MIMO_PS; if (mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_MODE) ni->ni_flags |= IEEE80211_NODE_MIMO_RTS; else ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS; /* XXX notify on change */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "%s: HT MIMO PS (%s%s)", __func__, (ni->ni_flags & IEEE80211_NODE_MIMO_PS) ? "on" : "off", (ni->ni_flags & IEEE80211_NODE_MIMO_RTS) ? "+rts" : "" ); return 0; } /* * Transmit processing. */ /* * Check if A-MPDU should be requested/enabled for a stream. * We require a traffic rate above a per-AC threshold and we * also handle backoff from previous failed attempts. * * Drivers may override this method to bring in information * such as link state conditions in making the decision. */ static int ieee80211_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ieee80211vap *vap = ni->ni_vap; if (tap->txa_avgpps < vap->iv_ampdu_mintraffic[TID_TO_WME_AC(tap->txa_tid)]) return 0; /* XXX check rssi? */ if (tap->txa_attempts >= ieee80211_addba_maxtries && ieee80211_time_after(ticks, tap->txa_nextrequest)) { /* * Don't retry too often; txa_nextrequest is set * to the minimum interval we'll retry after * ieee80211_addba_maxtries failed attempts are made. */ return 0; } IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni, "enable AMPDU on tid %d (%s), avgpps %d pkts %d attempt %d", tap->txa_tid, ieee80211_wme_acnames[TID_TO_WME_AC(tap->txa_tid)], tap->txa_avgpps, tap->txa_pkts, tap->txa_attempts); return 1; } /* * Request A-MPDU tx aggregation. Setup local state and * issue an ADDBA request. BA use will only happen after * the other end replies with ADDBA response. */ int ieee80211_ampdu_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ieee80211com *ic = ni->ni_ic; uint16_t args[5]; int tid, dialogtoken; static int tokens = 0; /* XXX */ /* XXX locking */ if ((tap->txa_flags & IEEE80211_AGGR_SETUP) == 0) { /* do deferred setup of state */ ampdu_tx_setup(tap); } /* XXX hack for not doing proper locking */ tap->txa_flags &= ~IEEE80211_AGGR_NAK; dialogtoken = (tokens+1) % 63; /* XXX */ tid = tap->txa_tid; /* * XXX TODO: This is racy with any other parallel TX going on. :( */ tap->txa_start = ni->ni_txseqs[tid]; args[0] = dialogtoken; args[1] = 0; /* NB: status code not used */ args[2] = IEEE80211_BAPS_POLICY_IMMEDIATE | _IEEE80211_SHIFTMASK(tid, IEEE80211_BAPS_TID) | _IEEE80211_SHIFTMASK(IEEE80211_AGGR_BAWMAX, IEEE80211_BAPS_BUFSIZ) ; /* XXX TODO: this should be a flag, not iv_htcaps */ if ((ni->ni_flags & IEEE80211_NODE_AMSDU_TX) && (ni->ni_vap->iv_htcaps & IEEE80211_HTC_TX_AMSDU_AMPDU)) args[2] |= IEEE80211_BAPS_AMSDU; args[3] = 0; /* batimeout */ /* NB: do first so there's no race against reply */ if (!ic->ic_addba_request(ni, tap, dialogtoken, args[2], args[3])) { /* unable to setup state, don't make request */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: could not setup BA stream for TID %d AC %d", __func__, tap->txa_tid, TID_TO_WME_AC(tap->txa_tid)); /* defer next try so we don't slam the driver with requests */ tap->txa_attempts = ieee80211_addba_maxtries; /* NB: check in case driver wants to override */ if (tap->txa_nextrequest <= ticks) tap->txa_nextrequest = ticks + ieee80211_addba_backoff; return 0; } tokens = dialogtoken; /* allocate token */ /* NB: after calling ic_addba_request so driver can set txa_start */ args[4] = _IEEE80211_SHIFTMASK(tap->txa_start, IEEE80211_BASEQ_START) | _IEEE80211_SHIFTMASK(0, IEEE80211_BASEQ_FRAG) ; return ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_ADDBA_REQUEST, args); } /* * Terminate an AMPDU tx stream. State is reclaimed * and the peer notified with a DelBA Action frame. */ void ieee80211_ampdu_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int reason) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; uint16_t args[4]; /* XXX locking */ tap->txa_flags &= ~IEEE80211_AGGR_BARPEND; if (IEEE80211_AMPDU_RUNNING(tap)) { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "%s: stop BA stream for TID %d (reason: %d (%s))", __func__, tap->txa_tid, reason, ieee80211_reason_to_string(reason)); vap->iv_stats.is_ampdu_stop++; ic->ic_addba_stop(ni, tap); args[0] = tap->txa_tid; args[1] = IEEE80211_DELBAPS_INIT; args[2] = reason; /* XXX reason code */ ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA, IEEE80211_ACTION_BA_DELBA, args); } else { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "%s: BA stream for TID %d not running " "(reason: %d (%s))", __func__, tap->txa_tid, reason, ieee80211_reason_to_string(reason)); vap->iv_stats.is_ampdu_stop_failed++; } } /* XXX */ static void bar_start_timer(struct ieee80211_tx_ampdu *tap); static void bar_timeout(void *arg) { struct ieee80211_tx_ampdu *tap = arg; struct ieee80211_node *ni = tap->txa_ni; KASSERT((tap->txa_flags & IEEE80211_AGGR_XCHGPEND) == 0, ("bar/addba collision, flags 0x%x", tap->txa_flags)); IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: tid %u flags 0x%x attempts %d", __func__, tap->txa_tid, tap->txa_flags, tap->txa_attempts); /* guard against race with bar_tx_complete */ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) == 0) return; /* XXX ? */ if (tap->txa_attempts >= ieee80211_bar_maxtries) { struct ieee80211com *ic = ni->ni_ic; ni->ni_vap->iv_stats.is_ampdu_bar_tx_fail++; /* * If (at least) the last BAR TX timeout was due to * an ieee80211_send_bar() failures, then we need * to make sure we notify the driver that a BAR * TX did occur and fail. This gives the driver * a chance to undo any queue pause that may * have occurred. */ ic->ic_bar_response(ni, tap, 1); ieee80211_ampdu_stop(ni, tap, IEEE80211_REASON_TIMEOUT); } else { ni->ni_vap->iv_stats.is_ampdu_bar_tx_retry++; if (ieee80211_send_bar(ni, tap, tap->txa_seqpending) != 0) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: failed to TX, starting timer\n", __func__); /* * If ieee80211_send_bar() fails here, the * timer may have stopped and/or the pending * flag may be clear. Because of this, * fake the BARPEND and reset the timer. * A retransmission attempt will then occur * during the next timeout. */ /* XXX locking */ tap->txa_flags |= IEEE80211_AGGR_BARPEND; bar_start_timer(tap); } } } static void bar_start_timer(struct ieee80211_tx_ampdu *tap) { IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: called", __func__); callout_reset(&tap->txa_timer, ieee80211_bar_timeout, bar_timeout, tap); } static void bar_stop_timer(struct ieee80211_tx_ampdu *tap) { IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: called", __func__); callout_stop(&tap->txa_timer); } static void bar_tx_complete(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211_tx_ampdu *tap = arg; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "%s: tid %u flags 0x%x pending %d status %d", __func__, tap->txa_tid, tap->txa_flags, callout_pending(&tap->txa_timer), status); ni->ni_vap->iv_stats.is_ampdu_bar_tx++; /* XXX locking */ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) && callout_pending(&tap->txa_timer)) { struct ieee80211com *ic = ni->ni_ic; if (status == 0) /* ACK'd */ bar_stop_timer(tap); ic->ic_bar_response(ni, tap, status); /* NB: just let timer expire so we pace requests */ } } static void ieee80211_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status) { IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: called", __func__); if (status == 0) { /* got ACK */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, "BAR moves BA win <%u:%u> (%u frames) txseq %u tid %u", tap->txa_start, IEEE80211_SEQ_ADD(tap->txa_start, tap->txa_wnd-1), tap->txa_qframes, tap->txa_seqpending, tap->txa_tid); /* NB: timer already stopped in bar_tx_complete */ tap->txa_start = tap->txa_seqpending; tap->txa_flags &= ~IEEE80211_AGGR_BARPEND; } } /* * Transmit a BAR frame to the specified node. The * BAR contents are drawn from the supplied aggregation * state associated with the node. * * NB: we only handle immediate ACK w/ compressed bitmap. */ int ieee80211_send_bar(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, ieee80211_seq seq) { #define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame_bar *bar; struct mbuf *m; uint16_t barctl, barseqctl; uint8_t *frm; int tid, ret; IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: called", __func__); if ((tap->txa_flags & IEEE80211_AGGR_RUNNING) == 0) { /* no ADDBA response, should not happen */ /* XXX stat+msg */ return EINVAL; } /* XXX locking */ bar_stop_timer(tap); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom, sizeof(*bar)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); if (!ieee80211_add_callback(m, bar_tx_complete, tap)) { m_freem(m); senderr(ENOMEM, is_tx_nobuf); /* XXX */ /* NOTREACHED */ } bar = mtod(m, struct ieee80211_frame_bar *); bar->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR; bar->i_fc[1] = 0; IEEE80211_ADDR_COPY(bar->i_ra, ni->ni_macaddr); IEEE80211_ADDR_COPY(bar->i_ta, vap->iv_myaddr); tid = tap->txa_tid; barctl = (tap->txa_flags & IEEE80211_AGGR_IMMEDIATE ? 0 : IEEE80211_BAR_NOACK) | IEEE80211_BAR_COMP | _IEEE80211_SHIFTMASK(tid, IEEE80211_BAR_TID) ; barseqctl = _IEEE80211_SHIFTMASK(seq, IEEE80211_BAR_SEQ_START); /* NB: known to have proper alignment */ bar->i_ctl = htole16(barctl); bar->i_seq = htole16(barseqctl); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_bar); M_WME_SETAC(m, WME_AC_VO); IEEE80211_NODE_STAT(ni, tx_mgmt); /* XXX tx_ctl? */ /* XXX locking */ /* init/bump attempts counter */ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) == 0) tap->txa_attempts = 1; else tap->txa_attempts++; tap->txa_seqpending = seq; tap->txa_flags |= IEEE80211_AGGR_BARPEND; IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_11N, ni, "send BAR: tid %u ctl 0x%x start %u (attempt %d)", tid, barctl, seq, tap->txa_attempts); /* * ic_raw_xmit will free the node reference * regardless of queue/TX success or failure. */ IEEE80211_TX_LOCK(ic); ret = ieee80211_raw_output(vap, ni, m, NULL); IEEE80211_TX_UNLOCK(ic); if (ret != 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_11N, ni, "send BAR: failed: (ret = %d)\n", ret); /* xmit failed, clear state flag */ tap->txa_flags &= ~IEEE80211_AGGR_BARPEND; vap->iv_stats.is_ampdu_bar_tx_fail++; return ret; } /* XXX hack against tx complete happening before timer is started */ if (tap->txa_flags & IEEE80211_AGGR_BARPEND) bar_start_timer(tap); return 0; bad: IEEE80211_NOTE(tap->txa_ni->ni_vap, IEEE80211_MSG_11N, tap->txa_ni, "%s: bad! ret=%d", __func__, ret); vap->iv_stats.is_ampdu_bar_tx_fail++; ieee80211_free_node(ni); return ret; #undef senderr } static int ht_action_output(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211_bpf_params params; memset(¶ms, 0, sizeof(params)); params.ibp_pri = WME_AC_VO; params.ibp_rate0 = ni->ni_txparms->mgmtrate; /* NB: we know all frames are unicast */ params.ibp_try0 = ni->ni_txparms->maxretry; params.ibp_power = ni->ni_txpower; return ieee80211_mgmt_output(ni, m, IEEE80211_FC0_SUBTYPE_ACTION, ¶ms); } #define ADDSHORT(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = (v) >> 8; \ frm += 2; \ } while (0) /* * Send an action management frame. The arguments are stuff * into a frame without inspection; the caller is assumed to * prepare them carefully (e.g. based on the aggregation state). */ static int ht_send_action_ba_addba(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = arg0; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "send ADDBA %s: dialogtoken %d status %d " "baparamset 0x%x (tid %d amsdu %d) batimeout 0x%x baseqctl 0x%x", (action == IEEE80211_ACTION_BA_ADDBA_REQUEST) ? "request" : "response", args[0], args[1], args[2], _IEEE80211_MASKSHIFT(args[2], IEEE80211_BAPS_TID), _IEEE80211_MASKSHIFT(args[2], IEEE80211_BAPS_AMSDU), args[3], args[4]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ /* XXX may action payload */ + sizeof(struct ieee80211_action_ba_addbaresponse) ); if (m != NULL) { *frm++ = category; *frm++ = action; *frm++ = args[0]; /* dialog token */ if (action == IEEE80211_ACTION_BA_ADDBA_RESPONSE) ADDSHORT(frm, args[1]); /* status code */ ADDSHORT(frm, args[2]); /* baparamset */ ADDSHORT(frm, args[3]); /* batimeout */ if (action == IEEE80211_ACTION_BA_ADDBA_REQUEST) ADDSHORT(frm, args[4]); /* baseqctl */ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return ht_action_output(ni, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int ht_send_action_ba_delba(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = arg0; struct mbuf *m; uint16_t baparamset; uint8_t *frm; baparamset = _IEEE80211_SHIFTMASK(args[0], IEEE80211_DELBAPS_TID) | args[1] ; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "send DELBA action: tid %d, initiator %d reason %d (%s)", args[0], args[1], args[2], ieee80211_reason_to_string(args[2])); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ /* XXX may action payload */ + sizeof(struct ieee80211_action_ba_addbaresponse) ); if (m != NULL) { *frm++ = category; *frm++ = action; ADDSHORT(frm, baparamset); ADDSHORT(frm, args[2]); /* reason code */ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return ht_action_output(ni, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int ht_send_action_ht_txchwidth(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni, "send HT txchwidth: width %d", IEEE80211_IS_CHAN_HT40(ni->ni_chan) ? 40 : 20); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ /* XXX may action payload */ + sizeof(struct ieee80211_action_ba_addbaresponse) ); if (m != NULL) { *frm++ = category; *frm++ = action; *frm++ = IEEE80211_IS_CHAN_HT40(ni->ni_chan) ? IEEE80211_A_HT_TXCHWIDTH_2040 : IEEE80211_A_HT_TXCHWIDTH_20; m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return ht_action_output(ni, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } #undef ADDSHORT /* * Construct the MCS bit mask for inclusion in an HT capabilities * information element. */ static void ieee80211_set_mcsset(struct ieee80211com *ic, uint8_t *frm) { int i; uint8_t txparams; KASSERT((ic->ic_rxstream > 0 && ic->ic_rxstream <= 4), ("ic_rxstream %d out of range", ic->ic_rxstream)); KASSERT((ic->ic_txstream > 0 && ic->ic_txstream <= 4), ("ic_txstream %d out of range", ic->ic_txstream)); for (i = 0; i < ic->ic_rxstream * 8; i++) setbit(frm, i); if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) && (ic->ic_htcaps & IEEE80211_HTC_RXMCS32)) setbit(frm, 32); if (ic->ic_htcaps & IEEE80211_HTC_RXUNEQUAL) { if (ic->ic_rxstream >= 2) { for (i = 33; i <= 38; i++) setbit(frm, i); } if (ic->ic_rxstream >= 3) { for (i = 39; i <= 52; i++) setbit(frm, i); } if (ic->ic_txstream >= 4) { for (i = 53; i <= 76; i++) setbit(frm, i); } } if (ic->ic_rxstream != ic->ic_txstream) { txparams = 0x1; /* TX MCS set defined */ txparams |= 0x2; /* TX RX MCS not equal */ txparams |= (ic->ic_txstream - 1) << 2; /* num TX streams */ if (ic->ic_htcaps & IEEE80211_HTC_TXUNEQUAL) txparams |= 0x16; /* TX unequal modulation sup */ } else txparams = 0; frm[12] = txparams; } /* * Add body of an HTCAP information element. */ static uint8_t * ieee80211_add_htcap_body(uint8_t *frm, struct ieee80211_node *ni) { #define ADDSHORT(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = (v) >> 8; \ frm += 2; \ } while (0) struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; uint16_t caps, extcaps; int rxmax, density; /* HT capabilities */ caps = vap->iv_htcaps & 0xffff; /* * Note channel width depends on whether we are operating as * a sta or not. When operating as a sta we are generating * a request based on our desired configuration. Otherwise * we are operational and the channel attributes identify * how we've been setup (which might be different if a fixed * channel is specified). */ if (vap->iv_opmode == IEEE80211_M_STA) { /* override 20/40 use based on config */ if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40) caps |= IEEE80211_HTCAP_CHWIDTH40; else caps &= ~IEEE80211_HTCAP_CHWIDTH40; /* Start by using the advertised settings */ rxmax = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); density = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); IEEE80211_DPRINTF(vap, IEEE80211_MSG_11N, "%s: advertised rxmax=%d, density=%d, vap rxmax=%d, density=%d\n", __func__, rxmax, density, vap->iv_ampdu_rxmax, vap->iv_ampdu_density); /* Cap at VAP rxmax */ if (rxmax > vap->iv_ampdu_rxmax) rxmax = vap->iv_ampdu_rxmax; /* * If the VAP ampdu density value greater, use that. * * (Larger density value == larger minimum gap between A-MPDU * subframes.) */ if (vap->iv_ampdu_density > density) density = vap->iv_ampdu_density; /* * NB: Hardware might support HT40 on some but not all * channels. We can't determine this earlier because only * after association the channel is upgraded to HT based * on the negotiated capabilities. */ if (ni->ni_chan != IEEE80211_CHAN_ANYC && findhtchan(ic, ni->ni_chan, IEEE80211_CHAN_HT40U) == NULL && findhtchan(ic, ni->ni_chan, IEEE80211_CHAN_HT40D) == NULL) caps &= ~IEEE80211_HTCAP_CHWIDTH40; } else { /* override 20/40 use based on current channel */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) caps |= IEEE80211_HTCAP_CHWIDTH40; else caps &= ~IEEE80211_HTCAP_CHWIDTH40; /* XXX TODO should it start by using advertised settings? */ rxmax = vap->iv_ampdu_rxmax; density = vap->iv_ampdu_density; } /* adjust short GI based on channel and config */ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) caps &= ~IEEE80211_HTCAP_SHORTGI20; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0 || (caps & IEEE80211_HTCAP_CHWIDTH40) == 0) caps &= ~IEEE80211_HTCAP_SHORTGI40; /* adjust STBC based on receive capabilities */ if ((vap->iv_flags_ht & IEEE80211_FHT_STBC_RX) == 0) caps &= ~IEEE80211_HTCAP_RXSTBC; /* adjust LDPC based on receive capabilites */ if ((vap->iv_flags_ht & IEEE80211_FHT_LDPC_RX) == 0) caps &= ~IEEE80211_HTCAP_LDPC; ADDSHORT(frm, caps); /* HT parameters */ *frm = _IEEE80211_SHIFTMASK(rxmax, IEEE80211_HTCAP_MAXRXAMPDU) | _IEEE80211_SHIFTMASK(density, IEEE80211_HTCAP_MPDUDENSITY) ; frm++; /* pre-zero remainder of ie */ memset(frm, 0, sizeof(struct ieee80211_ie_htcap) - __offsetof(struct ieee80211_ie_htcap, hc_mcsset)); /* supported MCS set */ /* * XXX: For sta mode the rate set should be restricted based * on the AP's capabilities, but ni_htrates isn't setup when * we're called to form an AssocReq frame so for now we're * restricted to the device capabilities. */ ieee80211_set_mcsset(ni->ni_ic, frm); frm += __offsetof(struct ieee80211_ie_htcap, hc_extcap) - __offsetof(struct ieee80211_ie_htcap, hc_mcsset); /* HT extended capabilities */ extcaps = vap->iv_htextcaps & 0xffff; ADDSHORT(frm, extcaps); frm += sizeof(struct ieee80211_ie_htcap) - __offsetof(struct ieee80211_ie_htcap, hc_txbf); return frm; #undef ADDSHORT } /* * Add 802.11n HT capabilities information element */ uint8_t * ieee80211_add_htcap(uint8_t *frm, struct ieee80211_node *ni) { frm[0] = IEEE80211_ELEMID_HTCAP; frm[1] = sizeof(struct ieee80211_ie_htcap) - 2; return ieee80211_add_htcap_body(frm + 2, ni); } /* * Non-associated probe request - add HT capabilities based on * the current channel configuration. */ static uint8_t * ieee80211_add_htcap_body_ch(uint8_t *frm, struct ieee80211vap *vap, struct ieee80211_channel *c) { #define ADDSHORT(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = (v) >> 8; \ frm += 2; \ } while (0) struct ieee80211com *ic = vap->iv_ic; uint16_t caps, extcaps; int rxmax, density; /* HT capabilities */ caps = vap->iv_htcaps & 0xffff; /* * We don't use this in STA mode; only in IBSS mode. * So in IBSS mode we base our HTCAP flags on the * given channel. */ /* override 20/40 use based on current channel */ if (IEEE80211_IS_CHAN_HT40(c)) caps |= IEEE80211_HTCAP_CHWIDTH40; else caps &= ~IEEE80211_HTCAP_CHWIDTH40; /* Use the currently configured values */ rxmax = vap->iv_ampdu_rxmax; density = vap->iv_ampdu_density; /* adjust short GI based on channel and config */ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) caps &= ~IEEE80211_HTCAP_SHORTGI20; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0 || (caps & IEEE80211_HTCAP_CHWIDTH40) == 0) caps &= ~IEEE80211_HTCAP_SHORTGI40; ADDSHORT(frm, caps); /* HT parameters */ *frm = _IEEE80211_SHIFTMASK(rxmax, IEEE80211_HTCAP_MAXRXAMPDU) | _IEEE80211_SHIFTMASK(density, IEEE80211_HTCAP_MPDUDENSITY) ; frm++; /* pre-zero remainder of ie */ memset(frm, 0, sizeof(struct ieee80211_ie_htcap) - __offsetof(struct ieee80211_ie_htcap, hc_mcsset)); /* supported MCS set */ /* * XXX: For sta mode the rate set should be restricted based * on the AP's capabilities, but ni_htrates isn't setup when * we're called to form an AssocReq frame so for now we're * restricted to the device capabilities. */ ieee80211_set_mcsset(ic, frm); frm += __offsetof(struct ieee80211_ie_htcap, hc_extcap) - __offsetof(struct ieee80211_ie_htcap, hc_mcsset); /* HT extended capabilities */ extcaps = vap->iv_htextcaps & 0xffff; ADDSHORT(frm, extcaps); frm += sizeof(struct ieee80211_ie_htcap) - __offsetof(struct ieee80211_ie_htcap, hc_txbf); return frm; #undef ADDSHORT } /* * Add 802.11n HT capabilities information element */ uint8_t * ieee80211_add_htcap_ch(uint8_t *frm, struct ieee80211vap *vap, struct ieee80211_channel *c) { frm[0] = IEEE80211_ELEMID_HTCAP; frm[1] = sizeof(struct ieee80211_ie_htcap) - 2; return ieee80211_add_htcap_body_ch(frm + 2, vap, c); } /* * Add Broadcom OUI wrapped standard HTCAP ie; this is * used for compatibility w/ pre-draft implementations. */ uint8_t * ieee80211_add_htcap_vendor(uint8_t *frm, struct ieee80211_node *ni) { frm[0] = IEEE80211_ELEMID_VENDOR; frm[1] = 4 + sizeof(struct ieee80211_ie_htcap) - 2; frm[2] = (BCM_OUI >> 0) & 0xff; frm[3] = (BCM_OUI >> 8) & 0xff; frm[4] = (BCM_OUI >> 16) & 0xff; frm[5] = BCM_OUI_HTCAP; return ieee80211_add_htcap_body(frm + 6, ni); } /* * Construct the MCS bit mask of basic rates * for inclusion in an HT information element. */ static void ieee80211_set_basic_htrates(uint8_t *frm, const struct ieee80211_htrateset *rs) { int i; for (i = 0; i < rs->rs_nrates; i++) { int r = rs->rs_rates[i] & IEEE80211_RATE_VAL; if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) && r < IEEE80211_HTRATE_MAXSIZE) { /* NB: this assumes a particular implementation */ setbit(frm, r); } } } /* * Update the HTINFO ie for a beacon frame. */ void ieee80211_ht_update_beacon(struct ieee80211vap *vap, struct ieee80211_beacon_offsets *bo) { #define PROTMODE (IEEE80211_HTINFO_OPMODE|IEEE80211_HTINFO_NONHT_PRESENT) struct ieee80211_node *ni; const struct ieee80211_channel *bsschan; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_ie_htinfo *ht = (struct ieee80211_ie_htinfo *) bo->bo_htinfo; ni = ieee80211_ref_node(vap->iv_bss); bsschan = ni->ni_chan; /* XXX only update on channel change */ ht->hi_ctrlchannel = ieee80211_chan2ieee(ic, bsschan); if (vap->iv_flags_ht & IEEE80211_FHT_RIFS) ht->hi_byte1 = IEEE80211_HTINFO_RIFSMODE_PERM; else ht->hi_byte1 = IEEE80211_HTINFO_RIFSMODE_PROH; if (IEEE80211_IS_CHAN_HT40U(bsschan)) ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_ABOVE; else if (IEEE80211_IS_CHAN_HT40D(bsschan)) ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_BELOW; else ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_NONE; if (IEEE80211_IS_CHAN_HT40(bsschan)) ht->hi_byte1 |= IEEE80211_HTINFO_TXWIDTH_2040; /* protection mode */ /* * XXX TODO: this uses the global flag, not the per-VAP flag. * Eventually (once the protection modes are done per-channel * rather than per-VAP) we can flip this over to be per-VAP but * using the channel protection mode. */ ht->hi_byte2 = (ht->hi_byte2 &~ PROTMODE) | ic->ic_curhtprotmode; ieee80211_free_node(ni); /* XXX propagate to vendor ie's */ #undef PROTMODE } /* * Add body of an HTINFO information element. * * NB: We don't use struct ieee80211_ie_htinfo because we can * be called to fillin both a standard ie and a compat ie that * has a vendor OUI at the front. */ static uint8_t * ieee80211_add_htinfo_body(uint8_t *frm, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; /* pre-zero remainder of ie */ memset(frm, 0, sizeof(struct ieee80211_ie_htinfo) - 2); /* primary/control channel center */ *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan); if (vap->iv_flags_ht & IEEE80211_FHT_RIFS) frm[0] = IEEE80211_HTINFO_RIFSMODE_PERM; else frm[0] = IEEE80211_HTINFO_RIFSMODE_PROH; if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) frm[0] |= IEEE80211_HTINFO_2NDCHAN_ABOVE; else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) frm[0] |= IEEE80211_HTINFO_2NDCHAN_BELOW; else frm[0] |= IEEE80211_HTINFO_2NDCHAN_NONE; if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) frm[0] |= IEEE80211_HTINFO_TXWIDTH_2040; /* * Add current protection mode. Unlike for beacons, * this will respect the per-VAP flags. */ frm[1] = vap->iv_curhtprotmode; frm += 5; /* basic MCS set */ ieee80211_set_basic_htrates(frm, &ni->ni_htrates); frm += sizeof(struct ieee80211_ie_htinfo) - __offsetof(struct ieee80211_ie_htinfo, hi_basicmcsset); return frm; } /* * Add 802.11n HT information element. */ uint8_t * ieee80211_add_htinfo(uint8_t *frm, struct ieee80211_node *ni) { frm[0] = IEEE80211_ELEMID_HTINFO; frm[1] = sizeof(struct ieee80211_ie_htinfo) - 2; return ieee80211_add_htinfo_body(frm + 2, ni); } /* * Add Broadcom OUI wrapped standard HTINFO ie; this is * used for compatibility w/ pre-draft implementations. */ uint8_t * ieee80211_add_htinfo_vendor(uint8_t *frm, struct ieee80211_node *ni) { frm[0] = IEEE80211_ELEMID_VENDOR; frm[1] = 4 + sizeof(struct ieee80211_ie_htinfo) - 2; frm[2] = (BCM_OUI >> 0) & 0xff; frm[3] = (BCM_OUI >> 8) & 0xff; frm[4] = (BCM_OUI >> 16) & 0xff; frm[5] = BCM_OUI_HTINFO; return ieee80211_add_htinfo_body(frm + 6, ni); } diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c index de07d501513b..77ba218ccd50 100644 --- a/sys/net80211/ieee80211_hwmp.c +++ b/sys/net80211/ieee80211_hwmp.c @@ -1,2089 +1,2089 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 The FreeBSD Foundation * * This software was developed by Rui Paulo under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11s Hybrid Wireless Mesh Protocol, HWMP. * * Based on March 2009, D3.0 802.11s draft spec. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void hwmp_vattach(struct ieee80211vap *); static void hwmp_vdetach(struct ieee80211vap *); static int hwmp_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int hwmp_send_action(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], uint8_t *, size_t); static uint8_t * hwmp_add_meshpreq(uint8_t *, const struct ieee80211_meshpreq_ie *); static uint8_t * hwmp_add_meshprep(uint8_t *, const struct ieee80211_meshprep_ie *); static uint8_t * hwmp_add_meshperr(uint8_t *, const struct ieee80211_meshperr_ie *); static uint8_t * hwmp_add_meshrann(uint8_t *, const struct ieee80211_meshrann_ie *); static void hwmp_rootmode_setup(struct ieee80211vap *); static void hwmp_rootmode_cb(void *); static void hwmp_rootmode_rann_cb(void *); static void hwmp_recv_preq(struct ieee80211vap *, struct ieee80211_node *, const struct ieee80211_frame *, const struct ieee80211_meshpreq_ie *); static int hwmp_send_preq(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct ieee80211_meshpreq_ie *, struct timeval *, struct timeval *); static void hwmp_recv_prep(struct ieee80211vap *, struct ieee80211_node *, const struct ieee80211_frame *, const struct ieee80211_meshprep_ie *); static int hwmp_send_prep(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct ieee80211_meshprep_ie *); static void hwmp_recv_perr(struct ieee80211vap *, struct ieee80211_node *, const struct ieee80211_frame *, const struct ieee80211_meshperr_ie *); static int hwmp_send_perr(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct ieee80211_meshperr_ie *); static void hwmp_senderror(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct ieee80211_mesh_route *, int); static void hwmp_recv_rann(struct ieee80211vap *, struct ieee80211_node *, const struct ieee80211_frame *, const struct ieee80211_meshrann_ie *); static int hwmp_send_rann(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct ieee80211_meshrann_ie *); static struct ieee80211_node * hwmp_discover(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], struct mbuf *); static void hwmp_peerdown(struct ieee80211_node *); static struct timeval ieee80211_hwmp_preqminint = { 0, 100000 }; static struct timeval ieee80211_hwmp_perrminint = { 0, 100000 }; /* NB: the Target Address set in a Proactive PREQ is the broadcast address. */ static const uint8_t broadcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; typedef uint32_t ieee80211_hwmp_seq; #define HWMP_SEQ_LT(a, b) ((int32_t)((a)-(b)) < 0) #define HWMP_SEQ_LEQ(a, b) ((int32_t)((a)-(b)) <= 0) #define HWMP_SEQ_EQ(a, b) ((int32_t)((a)-(b)) == 0) #define HWMP_SEQ_GT(a, b) ((int32_t)((a)-(b)) > 0) #define HWMP_SEQ_MAX(a, b) (a > b ? a : b) /* * Private extension of ieee80211_mesh_route. */ struct ieee80211_hwmp_route { ieee80211_hwmp_seq hr_seq; /* last HWMP seq seen from dst*/ ieee80211_hwmp_seq hr_preqid; /* last PREQ ID seen from dst */ ieee80211_hwmp_seq hr_origseq; /* seq. no. on our latest PREQ*/ struct timeval hr_lastpreq; /* last time we sent a PREQ */ struct timeval hr_lastrootconf; /* last sent PREQ root conf */ int hr_preqretries; /* number of discoveries */ int hr_lastdiscovery; /* last discovery in ticks */ }; struct ieee80211_hwmp_state { ieee80211_hwmp_seq hs_seq; /* next seq to be used */ ieee80211_hwmp_seq hs_preqid; /* next PREQ ID to be used */ int hs_rootmode; /* proactive HWMP */ struct timeval hs_lastperr; /* last time we sent a PERR */ struct callout hs_roottimer; uint8_t hs_maxhops; /* max hop count */ }; static SYSCTL_NODE(_net_wlan, OID_AUTO, hwmp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "IEEE 802.11s HWMP parameters"); static int ieee80211_hwmp_targetonly = 0; SYSCTL_INT(_net_wlan_hwmp, OID_AUTO, targetonly, CTLFLAG_RW, &ieee80211_hwmp_targetonly, 0, "Set TO bit on generated PREQs"); static int ieee80211_hwmp_pathtimeout = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, pathlifetime, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_pathtimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "path entry lifetime (ms)"); static int ieee80211_hwmp_maxpreq_retries = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, maxpreq_retries, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_maxpreq_retries, 0, ieee80211_sysctl_msecs_ticks, "I", "maximum number of preq retries"); static int ieee80211_hwmp_net_diameter_traversaltime = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, net_diameter_traversal_time, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_net_diameter_traversaltime, 0, ieee80211_sysctl_msecs_ticks, "I", "estimate traversal time across the MBSS (ms)"); static int ieee80211_hwmp_roottimeout = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, roottimeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_roottimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "root PREQ timeout (ms)"); static int ieee80211_hwmp_rootint = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, rootint, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_rootint, 0, ieee80211_sysctl_msecs_ticks, "I", "root interval (ms)"); static int ieee80211_hwmp_rannint = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, rannint, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ieee80211_hwmp_rannint, 0, ieee80211_sysctl_msecs_ticks, "I", "root announcement interval (ms)"); static struct timeval ieee80211_hwmp_rootconfint = { 0, 0 }; static int ieee80211_hwmp_rootconfint_internal = -1; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, rootconfint, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &ieee80211_hwmp_rootconfint_internal, 0, ieee80211_sysctl_msecs_ticks, "I", "root confirmation interval (ms) (read-only)"); #define IEEE80211_HWMP_DEFAULT_MAXHOPS 31 static ieee80211_recv_action_func hwmp_recv_action_meshpath; static struct ieee80211_mesh_proto_path mesh_proto_hwmp = { .mpp_descr = "HWMP", .mpp_ie = IEEE80211_MESHCONF_PATH_HWMP, .mpp_discover = hwmp_discover, .mpp_peerdown = hwmp_peerdown, .mpp_senderror = hwmp_senderror, .mpp_vattach = hwmp_vattach, .mpp_vdetach = hwmp_vdetach, .mpp_newstate = hwmp_newstate, .mpp_privlen = sizeof(struct ieee80211_hwmp_route), }; SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, inact, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &mesh_proto_hwmp.mpp_inact, 0, ieee80211_sysctl_msecs_ticks, "I", "mesh route inactivity timeout (ms)"); static void ieee80211_hwmp_init(void) { /* Default values as per amendment */ ieee80211_hwmp_pathtimeout = msecs_to_ticks(5*1000); ieee80211_hwmp_roottimeout = msecs_to_ticks(5*1000); ieee80211_hwmp_rootint = msecs_to_ticks(2*1000); ieee80211_hwmp_rannint = msecs_to_ticks(1*1000); ieee80211_hwmp_rootconfint_internal = msecs_to_ticks(2*1000); ieee80211_hwmp_maxpreq_retries = 3; /* * (TU): A measurement of time equal to 1024 μs, * 500 TU is 512 ms. */ ieee80211_hwmp_net_diameter_traversaltime = msecs_to_ticks(512); /* * NB: I dont know how to make SYSCTL_PROC that calls ms to ticks * and return a struct timeval... */ ieee80211_hwmp_rootconfint.tv_usec = ieee80211_hwmp_rootconfint_internal * 1000; /* * Register action frame handler. */ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_HWMP, hwmp_recv_action_meshpath); /* NB: default is 5 secs per spec */ mesh_proto_hwmp.mpp_inact = msecs_to_ticks(5*1000); /* * Register HWMP. */ ieee80211_mesh_register_proto_path(&mesh_proto_hwmp); } SYSINIT(wlan_hwmp, SI_SUB_DRIVERS, SI_ORDER_SECOND, ieee80211_hwmp_init, NULL); static void hwmp_vattach(struct ieee80211vap *vap) { struct ieee80211_hwmp_state *hs; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mesh vap, opmode %d", vap->iv_opmode)); hs = IEEE80211_MALLOC(sizeof(struct ieee80211_hwmp_state), M_80211_VAP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (hs == NULL) { printf("%s: couldn't alloc HWMP state\n", __func__); return; } hs->hs_maxhops = IEEE80211_HWMP_DEFAULT_MAXHOPS; callout_init(&hs->hs_roottimer, 1); vap->iv_hwmp = hs; } static void hwmp_vdetach(struct ieee80211vap *vap) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; callout_drain(&hs->hs_roottimer); IEEE80211_FREE(vap->iv_hwmp, M_80211_VAP); vap->iv_hwmp = NULL; } static int hwmp_newstate(struct ieee80211vap *vap, enum ieee80211_state ostate, int arg) { enum ieee80211_state nstate = vap->iv_state; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); if (nstate != IEEE80211_S_RUN && ostate == IEEE80211_S_RUN) callout_drain(&hs->hs_roottimer); if (nstate == IEEE80211_S_RUN) hwmp_rootmode_setup(vap); return 0; } /* * Verify the length of an HWMP PREQ and return the number * of destinations >= 1, if verification fails -1 is returned. */ static int verify_mesh_preq_len(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const uint8_t *iefrm) { int alloc_sz = -1; int ndest = -1; if (iefrm[2] & IEEE80211_MESHPREQ_FLAGS_AE) { /* Originator External Address present */ alloc_sz = IEEE80211_MESHPREQ_BASE_SZ_AE; ndest = iefrm[IEEE80211_MESHPREQ_TCNT_OFFSET_AE]; } else { /* w/o Originator External Address */ alloc_sz = IEEE80211_MESHPREQ_BASE_SZ; ndest = iefrm[IEEE80211_MESHPREQ_TCNT_OFFSET]; } alloc_sz += ndest * IEEE80211_MESHPREQ_TRGT_SZ; if(iefrm[1] != (alloc_sz)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP, wh, NULL, "PREQ (AE=%s) with wrong len", iefrm[2] & IEEE80211_MESHPREQ_FLAGS_AE ? "1" : "0"); return (-1); } return ndest; } /* * Verify the length of an HWMP PREP and returns 1 on success, * otherwise -1. */ static int verify_mesh_prep_len(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const uint8_t *iefrm) { int alloc_sz = -1; if (iefrm[2] & IEEE80211_MESHPREP_FLAGS_AE) { if (iefrm[1] == IEEE80211_MESHPREP_BASE_SZ_AE) alloc_sz = IEEE80211_MESHPREP_BASE_SZ_AE; } else if (iefrm[1] == IEEE80211_MESHPREP_BASE_SZ) alloc_sz = IEEE80211_MESHPREP_BASE_SZ; if(alloc_sz < 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP, wh, NULL, "PREP (AE=%s) with wrong len", iefrm[2] & IEEE80211_MESHPREP_FLAGS_AE ? "1" : "0"); return (-1); } return (1); } /* * Verify the length of an HWMP PERR and return the number * of destinations >= 1, if verification fails -1 is returned. */ static int verify_mesh_perr_len(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const uint8_t *iefrm) { int alloc_sz = -1; const uint8_t *iefrm_t = iefrm; uint8_t ndest = iefrm_t[IEEE80211_MESHPERR_NDEST_OFFSET]; int i; if(ndest > IEEE80211_MESHPERR_MAXDEST) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP, wh, NULL, "PERR with wrong number of destionat (>19), %u", ndest); return (-1); } iefrm_t += IEEE80211_MESHPERR_NDEST_OFFSET + 1; /* flag is next field */ /* We need to check each destination flag to know size */ for(i = 0; ini_vap; struct ieee80211_meshpreq_ie *preq; struct ieee80211_meshprep_ie *prep; struct ieee80211_meshperr_ie *perr; struct ieee80211_meshrann_ie rann; const uint8_t *iefrm = frm + 2; /* action + code */ const uint8_t *iefrm_t = iefrm; /* temporary pointer */ int ndest = -1; int found = 0; while (efrm - iefrm > 1) { IEEE80211_VERIFY_LENGTH(efrm - iefrm, iefrm[1] + 2, return 0); switch (*iefrm) { case IEEE80211_ELEMID_MESHPREQ: { int i = 0; iefrm_t = iefrm; ndest = verify_mesh_preq_len(vap, wh, iefrm_t); if (ndest < 0) { vap->iv_stats.is_rx_mgtdiscard++; break; } preq = IEEE80211_MALLOC(sizeof(*preq) + (ndest - 1) * sizeof(*preq->preq_targets), M_80211_MESH_PREQ, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); KASSERT(preq != NULL, ("preq == NULL")); preq->preq_ie = *iefrm_t++; preq->preq_len = *iefrm_t++; preq->preq_flags = *iefrm_t++; preq->preq_hopcount = *iefrm_t++; preq->preq_ttl = *iefrm_t++; preq->preq_id = le32dec(iefrm_t); iefrm_t += 4; IEEE80211_ADDR_COPY(preq->preq_origaddr, iefrm_t); iefrm_t += 6; preq->preq_origseq = le32dec(iefrm_t); iefrm_t += 4; /* NB: may have Originator Proxied Address */ if (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_AE) { IEEE80211_ADDR_COPY( preq->preq_orig_ext_addr, iefrm_t); iefrm_t += 6; } preq->preq_lifetime = le32dec(iefrm_t); iefrm_t += 4; preq->preq_metric = le32dec(iefrm_t); iefrm_t += 4; preq->preq_tcount = *iefrm_t++; for (i = 0; i < preq->preq_tcount; i++) { preq->preq_targets[i].target_flags = *iefrm_t++; IEEE80211_ADDR_COPY( preq->preq_targets[i].target_addr, iefrm_t); iefrm_t += 6; preq->preq_targets[i].target_seq = le32dec(iefrm_t); iefrm_t += 4; } hwmp_recv_preq(vap, ni, wh, preq); IEEE80211_FREE(preq, M_80211_MESH_PREQ); found++; break; } case IEEE80211_ELEMID_MESHPREP: { iefrm_t = iefrm; ndest = verify_mesh_prep_len(vap, wh, iefrm_t); if (ndest < 0) { vap->iv_stats.is_rx_mgtdiscard++; break; } prep = IEEE80211_MALLOC(sizeof(*prep), M_80211_MESH_PREP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); KASSERT(prep != NULL, ("prep == NULL")); prep->prep_ie = *iefrm_t++; prep->prep_len = *iefrm_t++; prep->prep_flags = *iefrm_t++; prep->prep_hopcount = *iefrm_t++; prep->prep_ttl = *iefrm_t++; IEEE80211_ADDR_COPY(prep->prep_targetaddr, iefrm_t); iefrm_t += 6; prep->prep_targetseq = le32dec(iefrm_t); iefrm_t += 4; /* NB: May have Target Proxied Address */ if (prep->prep_flags & IEEE80211_MESHPREP_FLAGS_AE) { IEEE80211_ADDR_COPY( prep->prep_target_ext_addr, iefrm_t); iefrm_t += 6; } prep->prep_lifetime = le32dec(iefrm_t); iefrm_t += 4; prep->prep_metric = le32dec(iefrm_t); iefrm_t += 4; IEEE80211_ADDR_COPY(prep->prep_origaddr, iefrm_t); iefrm_t += 6; prep->prep_origseq = le32dec(iefrm_t); iefrm_t += 4; hwmp_recv_prep(vap, ni, wh, prep); IEEE80211_FREE(prep, M_80211_MESH_PREP); found++; break; } case IEEE80211_ELEMID_MESHPERR: { int i = 0; iefrm_t = iefrm; ndest = verify_mesh_perr_len(vap, wh, iefrm_t); if (ndest < 0) { vap->iv_stats.is_rx_mgtdiscard++; break; } perr = IEEE80211_MALLOC(sizeof(*perr) + (ndest - 1) * sizeof(*perr->perr_dests), M_80211_MESH_PERR, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); KASSERT(perr != NULL, ("perr == NULL")); perr->perr_ie = *iefrm_t++; perr->perr_len = *iefrm_t++; perr->perr_ttl = *iefrm_t++; perr->perr_ndests = *iefrm_t++; for (i = 0; iperr_ndests; i++) { perr->perr_dests[i].dest_flags = *iefrm_t++; IEEE80211_ADDR_COPY( perr->perr_dests[i].dest_addr, iefrm_t); iefrm_t += 6; perr->perr_dests[i].dest_seq = le32dec(iefrm_t); iefrm_t += 4; /* NB: May have Target Proxied Address */ if (perr->perr_dests[i].dest_flags & IEEE80211_MESHPERR_FLAGS_AE) { IEEE80211_ADDR_COPY( perr->perr_dests[i].dest_ext_addr, iefrm_t); iefrm_t += 6; } perr->perr_dests[i].dest_rcode = le16dec(iefrm_t); iefrm_t += 2; } hwmp_recv_perr(vap, ni, wh, perr); IEEE80211_FREE(perr, M_80211_MESH_PERR); found++; break; } case IEEE80211_ELEMID_MESHRANN: { const struct ieee80211_meshrann_ie *mrann = (const struct ieee80211_meshrann_ie *) iefrm; if (mrann->rann_len != sizeof(struct ieee80211_meshrann_ie) - 2) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP, wh, NULL, "%s", "RAN with wrong len"); vap->iv_stats.is_rx_mgtdiscard++; return 1; } memcpy(&rann, mrann, sizeof(rann)); rann.rann_seq = le32dec(&mrann->rann_seq); rann.rann_interval = le32dec(&mrann->rann_interval); rann.rann_metric = le32dec(&mrann->rann_metric); hwmp_recv_rann(vap, ni, wh, &rann); found++; break; } } iefrm += iefrm[1] + 2; } if (!found) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP, wh, NULL, "%s", "PATH SEL action without IE"); vap->iv_stats.is_rx_mgtdiscard++; } return 0; } static int hwmp_send_action(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], uint8_t *ie, size_t len) { struct ieee80211_node *ni; struct ieee80211com *ic; struct ieee80211_bpf_params params; struct mbuf *m; uint8_t *frm; int ret; if (IEEE80211_IS_MULTICAST(da)) { ni = ieee80211_ref_node(vap->iv_bss); #ifdef IEEE80211_DEBUG_REFCNT IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); #endif ieee80211_ref_node(ni); } else ni = ieee80211_mesh_find_txnode(vap, da); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "HWMP action"); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } KASSERT(ni != NULL, ("null node")); ic = ni->ni_ic; m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(struct ieee80211_action) + len ); if (m == NULL) { ieee80211_free_node(ni); vap->iv_stats.is_tx_nobuf++; return ENOMEM; } *frm++ = IEEE80211_ACTION_CAT_MESH; *frm++ = IEEE80211_ACTION_MESH_HWMP; switch (*ie) { case IEEE80211_ELEMID_MESHPREQ: frm = hwmp_add_meshpreq(frm, (struct ieee80211_meshpreq_ie *)ie); break; case IEEE80211_ELEMID_MESHPREP: frm = hwmp_add_meshprep(frm, (struct ieee80211_meshprep_ie *)ie); break; case IEEE80211_ELEMID_MESHPERR: frm = hwmp_add_meshperr(frm, (struct ieee80211_meshperr_ie *)ie); break; case IEEE80211_ELEMID_MESHRANN: frm = hwmp_add_meshrann(frm, (struct ieee80211_meshrann_ie *)ie); break; } m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); if (m == NULL) { ieee80211_free_node(ni); vap->iv_stats.is_tx_nobuf++; return ENOMEM; } IEEE80211_TX_LOCK(ic); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION, IEEE80211_NONQOS_TID, vap->iv_myaddr, da, vap->iv_myaddr); m->m_flags |= M_ENCAP; /* mark encapsulated */ IEEE80211_NODE_STAT(ni, tx_mgmt); memset(¶ms, 0, sizeof(params)); params.ibp_pri = WME_AC_VO; params.ibp_rate0 = ni->ni_txparms->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) params.ibp_try0 = 1; else params.ibp_try0 = ni->ni_txparms->maxretry; params.ibp_power = ni->ni_txpower; ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); return (ret); } #define ADDSHORT(frm, v) do { \ le16enc(frm, v); \ frm += 2; \ } while (0) #define ADDWORD(frm, v) do { \ le32enc(frm, v); \ frm += 4; \ } while (0) /* * Add a Mesh Path Request IE to a frame. */ #define PREQ_TFLAGS(n) preq->preq_targets[n].target_flags #define PREQ_TADDR(n) preq->preq_targets[n].target_addr #define PREQ_TSEQ(n) preq->preq_targets[n].target_seq static uint8_t * hwmp_add_meshpreq(uint8_t *frm, const struct ieee80211_meshpreq_ie *preq) { int i; *frm++ = IEEE80211_ELEMID_MESHPREQ; *frm++ = preq->preq_len; /* len already calculated */ *frm++ = preq->preq_flags; *frm++ = preq->preq_hopcount; *frm++ = preq->preq_ttl; ADDWORD(frm, preq->preq_id); IEEE80211_ADDR_COPY(frm, preq->preq_origaddr); frm += 6; ADDWORD(frm, preq->preq_origseq); if (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_AE) { IEEE80211_ADDR_COPY(frm, preq->preq_orig_ext_addr); frm += 6; } ADDWORD(frm, preq->preq_lifetime); ADDWORD(frm, preq->preq_metric); *frm++ = preq->preq_tcount; for (i = 0; i < preq->preq_tcount; i++) { *frm++ = PREQ_TFLAGS(i); IEEE80211_ADDR_COPY(frm, PREQ_TADDR(i)); frm += 6; ADDWORD(frm, PREQ_TSEQ(i)); } return frm; } #undef PREQ_TFLAGS #undef PREQ_TADDR #undef PREQ_TSEQ /* * Add a Mesh Path Reply IE to a frame. */ static uint8_t * hwmp_add_meshprep(uint8_t *frm, const struct ieee80211_meshprep_ie *prep) { *frm++ = IEEE80211_ELEMID_MESHPREP; *frm++ = prep->prep_len; /* len already calculated */ *frm++ = prep->prep_flags; *frm++ = prep->prep_hopcount; *frm++ = prep->prep_ttl; IEEE80211_ADDR_COPY(frm, prep->prep_targetaddr); frm += 6; ADDWORD(frm, prep->prep_targetseq); if (prep->prep_flags & IEEE80211_MESHPREP_FLAGS_AE) { IEEE80211_ADDR_COPY(frm, prep->prep_target_ext_addr); frm += 6; } ADDWORD(frm, prep->prep_lifetime); ADDWORD(frm, prep->prep_metric); IEEE80211_ADDR_COPY(frm, prep->prep_origaddr); frm += 6; ADDWORD(frm, prep->prep_origseq); return frm; } /* * Add a Mesh Path Error IE to a frame. */ #define PERR_DFLAGS(n) perr->perr_dests[n].dest_flags #define PERR_DADDR(n) perr->perr_dests[n].dest_addr #define PERR_DSEQ(n) perr->perr_dests[n].dest_seq #define PERR_EXTADDR(n) perr->perr_dests[n].dest_ext_addr #define PERR_DRCODE(n) perr->perr_dests[n].dest_rcode static uint8_t * hwmp_add_meshperr(uint8_t *frm, const struct ieee80211_meshperr_ie *perr) { int i; *frm++ = IEEE80211_ELEMID_MESHPERR; *frm++ = perr->perr_len; /* len already calculated */ *frm++ = perr->perr_ttl; *frm++ = perr->perr_ndests; for (i = 0; i < perr->perr_ndests; i++) { *frm++ = PERR_DFLAGS(i); IEEE80211_ADDR_COPY(frm, PERR_DADDR(i)); frm += 6; ADDWORD(frm, PERR_DSEQ(i)); if (PERR_DFLAGS(i) & IEEE80211_MESHPERR_FLAGS_AE) { IEEE80211_ADDR_COPY(frm, PERR_EXTADDR(i)); frm += 6; } ADDSHORT(frm, PERR_DRCODE(i)); } return frm; } #undef PERR_DFLAGS #undef PERR_DADDR #undef PERR_DSEQ #undef PERR_EXTADDR #undef PERR_DRCODE /* * Add a Root Annoucement IE to a frame. */ static uint8_t * hwmp_add_meshrann(uint8_t *frm, const struct ieee80211_meshrann_ie *rann) { *frm++ = IEEE80211_ELEMID_MESHRANN; *frm++ = rann->rann_len; *frm++ = rann->rann_flags; *frm++ = rann->rann_hopcount; *frm++ = rann->rann_ttl; IEEE80211_ADDR_COPY(frm, rann->rann_addr); frm += 6; ADDWORD(frm, rann->rann_seq); ADDWORD(frm, rann->rann_interval); ADDWORD(frm, rann->rann_metric); return frm; } static void hwmp_rootmode_setup(struct ieee80211vap *vap) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_state *ms = vap->iv_mesh; switch (hs->hs_rootmode) { case IEEE80211_HWMP_ROOTMODE_DISABLED: callout_drain(&hs->hs_roottimer); ms->ms_flags &= ~IEEE80211_MESHFLAGS_ROOT; break; case IEEE80211_HWMP_ROOTMODE_NORMAL: case IEEE80211_HWMP_ROOTMODE_PROACTIVE: callout_reset(&hs->hs_roottimer, ieee80211_hwmp_rootint, hwmp_rootmode_cb, vap); ms->ms_flags |= IEEE80211_MESHFLAGS_ROOT; break; case IEEE80211_HWMP_ROOTMODE_RANN: callout_reset(&hs->hs_roottimer, ieee80211_hwmp_rannint, hwmp_rootmode_rann_cb, vap); ms->ms_flags |= IEEE80211_MESHFLAGS_ROOT; break; } } /* * Send a broadcast Path Request to find all nodes on the mesh. We are * called when the vap is configured as a HWMP root node. */ #define PREQ_TFLAGS(n) preq.preq_targets[n].target_flags #define PREQ_TADDR(n) preq.preq_targets[n].target_addr #define PREQ_TSEQ(n) preq.preq_targets[n].target_seq static void hwmp_rootmode_cb(void *arg) { struct ieee80211vap *vap = (struct ieee80211vap *)arg; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshpreq_ie preq; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, vap->iv_bss, "%s", "send broadcast PREQ"); preq.preq_flags = 0; if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) preq.preq_flags |= IEEE80211_MESHPREQ_FLAGS_GATE; if (hs->hs_rootmode == IEEE80211_HWMP_ROOTMODE_PROACTIVE) preq.preq_flags |= IEEE80211_MESHPREQ_FLAGS_PP; preq.preq_hopcount = 0; preq.preq_ttl = ms->ms_ttl; preq.preq_id = ++hs->hs_preqid; IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr); preq.preq_origseq = ++hs->hs_seq; preq.preq_lifetime = ticks_to_msecs(ieee80211_hwmp_roottimeout); preq.preq_metric = IEEE80211_MESHLMETRIC_INITIALVAL; preq.preq_tcount = 1; IEEE80211_ADDR_COPY(PREQ_TADDR(0), broadcastaddr); PREQ_TFLAGS(0) = IEEE80211_MESHPREQ_TFLAGS_TO | IEEE80211_MESHPREQ_TFLAGS_USN; PREQ_TSEQ(0) = 0; vap->iv_stats.is_hwmp_rootreqs++; /* NB: we enforce rate check ourself */ hwmp_send_preq(vap, broadcastaddr, &preq, NULL, NULL); hwmp_rootmode_setup(vap); } #undef PREQ_TFLAGS #undef PREQ_TADDR #undef PREQ_TSEQ /* * Send a Root Annoucement (RANN) to find all the nodes on the mesh. We are * called when the vap is configured as a HWMP RANN root node. */ static void hwmp_rootmode_rann_cb(void *arg) { struct ieee80211vap *vap = (struct ieee80211vap *)arg; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshrann_ie rann; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, vap->iv_bss, "%s", "send broadcast RANN"); rann.rann_flags = 0; if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) rann.rann_flags |= IEEE80211_MESHFLAGS_GATE; rann.rann_hopcount = 0; rann.rann_ttl = ms->ms_ttl; IEEE80211_ADDR_COPY(rann.rann_addr, vap->iv_myaddr); rann.rann_seq = ++hs->hs_seq; rann.rann_interval = ieee80211_hwmp_rannint; rann.rann_metric = IEEE80211_MESHLMETRIC_INITIALVAL; vap->iv_stats.is_hwmp_rootrann++; hwmp_send_rann(vap, broadcastaddr, &rann); hwmp_rootmode_setup(vap); } /* * Update forwarding information to TA if metric improves. */ static void hwmp_update_transmitter(struct ieee80211vap *vap, struct ieee80211_node *ni, const char *hwmp_frame) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rttran = NULL; /* Transmitter */ int metric = 0; rttran = ieee80211_mesh_rt_find(vap, ni->ni_macaddr); if (rttran == NULL) { rttran = ieee80211_mesh_rt_add(vap, ni->ni_macaddr); if (rttran == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add path to transmitter %6D of %s", ni->ni_macaddr, ":", hwmp_frame); vap->iv_stats.is_mesh_rtaddfailed++; return; } } metric = ms->ms_pmetric->mpm_metric(ni); if (!(rttran->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) || rttran->rt_metric > metric) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "%s path to transmitter %6D of %s, metric %d:%d", rttran->rt_flags & IEEE80211_MESHRT_FLAGS_VALID ? "prefer" : "update", ni->ni_macaddr, ":", hwmp_frame, rttran->rt_metric, metric); IEEE80211_ADDR_COPY(rttran->rt_nexthop, ni->ni_macaddr); rttran->rt_metric = metric; rttran->rt_nhops = 1; ieee80211_mesh_rt_update(rttran, ms->ms_ppath->mpp_inact); rttran->rt_flags = IEEE80211_MESHRT_FLAGS_VALID; } } #define PREQ_TFLAGS(n) preq->preq_targets[n].target_flags #define PREQ_TADDR(n) preq->preq_targets[n].target_addr #define PREQ_TSEQ(n) preq->preq_targets[n].target_seq static void hwmp_recv_preq(struct ieee80211vap *vap, struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_meshpreq_ie *preq) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rtorig = NULL; struct ieee80211_mesh_route *rtorig_ext = NULL; struct ieee80211_mesh_route *rttarg = NULL; struct ieee80211_hwmp_route *hrorig = NULL; struct ieee80211_hwmp_route *hrtarg = NULL; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; ieee80211_hwmp_seq preqid; /* last seen preqid for orig */ uint32_t metric = 0; /* * Ignore PREQs from us. Could happen because someone forward it * back to us. */ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, preq->preq_origaddr)) return; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "received PREQ, orig %6D, targ(0) %6D", preq->preq_origaddr, ":", PREQ_TADDR(0), ":"); /* * Acceptance criteria: (if the PREQ is not for us or not broadcast, * or an external mac address not proxied by us), * AND forwarding is disabled, discard this PREQ. */ rttarg = ieee80211_mesh_rt_find(vap, PREQ_TADDR(0)); if (!(ms->ms_flags & IEEE80211_MESHFLAGS_FWD) && (!IEEE80211_ADDR_EQ(vap->iv_myaddr, PREQ_TADDR(0)) || !IEEE80211_IS_MULTICAST(PREQ_TADDR(0)) || (rttarg != NULL && rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY && IEEE80211_ADDR_EQ(vap->iv_myaddr, rttarg->rt_mesh_gate)))) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_HWMP, preq->preq_origaddr, NULL, "%s", "not accepting PREQ"); return; } /* * Acceptance criteria: if unicast addressed * AND no valid forwarding for Target of PREQ, discard this PREQ. */ if(rttarg != NULL) hrtarg = IEEE80211_MESH_ROUTE_PRIV(rttarg, struct ieee80211_hwmp_route); /* Address mode: ucast */ if(preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_AM && rttarg == NULL && !IEEE80211_ADDR_EQ(vap->iv_myaddr, PREQ_TADDR(0))) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_HWMP, preq->preq_origaddr, NULL, "unicast addressed PREQ of unknown target %6D", PREQ_TADDR(0), ":"); return; } /* PREQ ACCEPTED */ rtorig = ieee80211_mesh_rt_find(vap, preq->preq_origaddr); if (rtorig == NULL) { rtorig = ieee80211_mesh_rt_add(vap, preq->preq_origaddr); if (rtorig == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add orig path to %6D", preq->preq_origaddr, ":"); vap->iv_stats.is_mesh_rtaddfailed++; return; } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "adding originator %6D", preq->preq_origaddr, ":"); } hrorig = IEEE80211_MESH_ROUTE_PRIV(rtorig, struct ieee80211_hwmp_route); /* record last seen preqid */ preqid = hrorig->hr_preqid; hrorig->hr_preqid = HWMP_SEQ_MAX(hrorig->hr_preqid, preq->preq_id); /* Data creation and update of forwarding information * according to Table 11C-8 for originator mesh STA. */ metric = preq->preq_metric + ms->ms_pmetric->mpm_metric(ni); if (HWMP_SEQ_GT(preq->preq_origseq, hrorig->hr_seq) || (HWMP_SEQ_EQ(preq->preq_origseq, hrorig->hr_seq) && metric < rtorig->rt_metric)) { hrorig->hr_seq = preq->preq_origseq; IEEE80211_ADDR_COPY(rtorig->rt_nexthop, wh->i_addr2); rtorig->rt_metric = metric; rtorig->rt_nhops = preq->preq_hopcount + 1; ieee80211_mesh_rt_update(rtorig, preq->preq_lifetime); /* Path to orig is valid now. * NB: we know it can't be Proxy, and if it is GATE * it will be marked below. */ rtorig->rt_flags = IEEE80211_MESHRT_FLAGS_VALID; } else if ((hrtarg != NULL && !HWMP_SEQ_EQ(hrtarg->hr_seq, PREQ_TSEQ(0))) || (rtorig->rt_flags & IEEE80211_MESHRT_FLAGS_VALID && preqid >= preq->preq_id)) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "discard PREQ from %6D, old seqno %u <= %u," " or old preqid %u < %u", preq->preq_origaddr, ":", preq->preq_origseq, hrorig->hr_seq, preq->preq_id, preqid); return; } /* Update forwarding information to TA if metric improves. */ hwmp_update_transmitter(vap, ni, "PREQ"); /* * Check if the PREQ is addressed to us. * or a Proxy currently gated by us. */ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, PREQ_TADDR(0)) || (ms->ms_flags & IEEE80211_MESHFLAGS_GATE && rttarg != NULL && IEEE80211_ADDR_EQ(vap->iv_myaddr, rttarg->rt_mesh_gate) && rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY && rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) { struct ieee80211_meshprep_ie prep; /* * When we are the target we shall update our own HWMP seq * number with max of (current and preq->seq) + 1 */ hs->hs_seq = HWMP_SEQ_MAX(hs->hs_seq, PREQ_TSEQ(0)) + 1; prep.prep_flags = 0; prep.prep_hopcount = 0; prep.prep_metric = IEEE80211_MESHLMETRIC_INITIALVAL; IEEE80211_ADDR_COPY(prep.prep_targetaddr, vap->iv_myaddr); if (rttarg != NULL && /* if NULL it means we are the target */ rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "reply for proxy %6D", rttarg->rt_dest, ":"); prep.prep_flags |= IEEE80211_MESHPREP_FLAGS_AE; IEEE80211_ADDR_COPY(prep.prep_target_ext_addr, rttarg->rt_dest); /* update proxy seqno to HWMP seqno */ rttarg->rt_ext_seq = hs->hs_seq; prep.prep_hopcount = rttarg->rt_nhops; prep.prep_metric = rttarg->rt_metric; IEEE80211_ADDR_COPY(prep.prep_targetaddr, rttarg->rt_mesh_gate); } /* * Build and send a PREP frame. */ prep.prep_ttl = ms->ms_ttl; prep.prep_targetseq = hs->hs_seq; prep.prep_lifetime = preq->preq_lifetime; IEEE80211_ADDR_COPY(prep.prep_origaddr, preq->preq_origaddr); prep.prep_origseq = preq->preq_origseq; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "reply to %6D", preq->preq_origaddr, ":"); hwmp_send_prep(vap, wh->i_addr2, &prep); return; } /* we may update our proxy information for the orig external */ else if (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_AE) { rtorig_ext = ieee80211_mesh_rt_find(vap, preq->preq_orig_ext_addr); if (rtorig_ext == NULL) { rtorig_ext = ieee80211_mesh_rt_add(vap, preq->preq_orig_ext_addr); if (rtorig_ext == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add orig ext proxy to %6D", preq->preq_orig_ext_addr, ":"); vap->iv_stats.is_mesh_rtaddfailed++; return; } IEEE80211_ADDR_COPY(rtorig_ext->rt_mesh_gate, preq->preq_origaddr); } rtorig_ext->rt_ext_seq = preq->preq_origseq; ieee80211_mesh_rt_update(rtorig_ext, preq->preq_lifetime); } /* * Proactive PREQ: reply with a proactive PREP to the * root STA if requested. */ if (IEEE80211_ADDR_EQ(PREQ_TADDR(0), broadcastaddr) && (PREQ_TFLAGS(0) & IEEE80211_MESHPREQ_TFLAGS_TO)) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "root mesh station @ %6D", preq->preq_origaddr, ":"); /* Check if root is a mesh gate, mark it */ if (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_GATE) { struct ieee80211_mesh_gate_route *gr; rtorig->rt_flags |= IEEE80211_MESHRT_FLAGS_GATE; gr = ieee80211_mesh_mark_gate(vap, preq->preq_origaddr, rtorig); gr->gr_lastseq = 0; /* NOT GANN */ } /* * Reply with a PREP if we don't have a path to the root * or if the root sent us a proactive PREQ. */ if ((rtorig->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 || (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_PP)) { struct ieee80211_meshprep_ie prep; prep.prep_flags = 0; prep.prep_hopcount = 0; prep.prep_ttl = ms->ms_ttl; IEEE80211_ADDR_COPY(prep.prep_origaddr, preq->preq_origaddr); prep.prep_origseq = preq->preq_origseq; prep.prep_lifetime = preq->preq_lifetime; prep.prep_metric = IEEE80211_MESHLMETRIC_INITIALVAL; IEEE80211_ADDR_COPY(prep.prep_targetaddr, vap->iv_myaddr); prep.prep_targetseq = ++hs->hs_seq; hwmp_send_prep(vap, rtorig->rt_nexthop, &prep); } } /* * Forwarding and Intermediate reply for PREQs with 1 target. */ if ((preq->preq_tcount == 1) && (preq->preq_ttl > 1) && (ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) { struct ieee80211_meshpreq_ie ppreq; /* propagated PREQ */ memcpy(&ppreq, preq, sizeof(ppreq)); /* * We have a valid route to this node. * NB: if target is proxy dont reply. */ if (rttarg != NULL && rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_VALID && !(rttarg->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY)) { /* * Check if we can send an intermediate Path Reply, * i.e., Target Only bit is not set and target is not * the MAC broadcast address. */ if (!(PREQ_TFLAGS(0) & IEEE80211_MESHPREQ_TFLAGS_TO) && !IEEE80211_ADDR_EQ(PREQ_TADDR(0), broadcastaddr)) { struct ieee80211_meshprep_ie prep; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "intermediate reply for PREQ from %6D", preq->preq_origaddr, ":"); prep.prep_flags = 0; prep.prep_hopcount = rttarg->rt_nhops; prep.prep_ttl = ms->ms_ttl; IEEE80211_ADDR_COPY(&prep.prep_targetaddr, PREQ_TADDR(0)); prep.prep_targetseq = hrtarg->hr_seq; prep.prep_lifetime = preq->preq_lifetime; prep.prep_metric =rttarg->rt_metric; IEEE80211_ADDR_COPY(&prep.prep_origaddr, preq->preq_origaddr); prep.prep_origseq = hrorig->hr_seq; hwmp_send_prep(vap, rtorig->rt_nexthop, &prep); /* * Set TO and unset RF bits because we have * sent a PREP. */ ppreq.preq_targets[0].target_flags |= IEEE80211_MESHPREQ_TFLAGS_TO; } } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "forward PREQ from %6D", preq->preq_origaddr, ":"); ppreq.preq_hopcount += 1; ppreq.preq_ttl -= 1; ppreq.preq_metric += ms->ms_pmetric->mpm_metric(ni); /* don't do PREQ ratecheck when we propagate */ hwmp_send_preq(vap, broadcastaddr, &ppreq, NULL, NULL); } } #undef PREQ_TFLAGS #undef PREQ_TADDR #undef PREQ_TSEQ static int hwmp_send_preq(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshpreq_ie *preq, struct timeval *last, struct timeval *minint) { /* * Enforce PREQ interval. * NB: Proactive ROOT PREQs rate is handled by cb task. */ if (last != NULL && minint != NULL) { if (ratecheck(last, minint) == 0) return EALREADY; /* XXX: we should postpone */ getmicrouptime(last); } /* * mesh preq action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] mesh path request */ preq->preq_ie = IEEE80211_ELEMID_MESHPREQ; preq->preq_len = (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_AE ? IEEE80211_MESHPREQ_BASE_SZ_AE : IEEE80211_MESHPREQ_BASE_SZ) + preq->preq_tcount * IEEE80211_MESHPREQ_TRGT_SZ; return hwmp_send_action(vap, da, (uint8_t *)preq, preq->preq_len+2); } static void hwmp_recv_prep(struct ieee80211vap *vap, struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_meshprep_ie *prep) { #define IS_PROXY(rt) (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) #define PROXIED_BY_US(rt) \ (IEEE80211_ADDR_EQ(vap->iv_myaddr, rt->rt_mesh_gate)) struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_route *rt = NULL; struct ieee80211_mesh_route *rtorig = NULL; struct ieee80211_mesh_route *rtext = NULL; struct ieee80211_hwmp_route *hr; struct ieee80211com *ic = vap->iv_ic; struct mbuf *m, *next; uint32_t metric = 0; const uint8_t *addr; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "received PREP, orig %6D, targ %6D", prep->prep_origaddr, ":", prep->prep_targetaddr, ":"); /* * Acceptance criteria: (If the corresponding PREP was not generated * by us OR not generated by an external mac that is not proxied by us) * AND forwarding is disabled, discard this PREP. */ rtorig = ieee80211_mesh_rt_find(vap, prep->prep_origaddr); if ((!IEEE80211_ADDR_EQ(vap->iv_myaddr, prep->prep_origaddr) || (rtorig != NULL && IS_PROXY(rtorig) && !PROXIED_BY_US(rtorig))) && !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)){ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "discard PREP, orig(%6D) not proxied or generated by us", prep->prep_origaddr, ":"); return; } /* PREP ACCEPTED */ /* * If accepted shall create or update the active forwarding information * it maintains for the target mesh STA of the PREP (according to the * rules defined in 13.10.8.4). If the conditions for creating or * updating the forwarding information have not been met in those * rules, no further steps are applied to the PREP. */ rt = ieee80211_mesh_rt_find(vap, prep->prep_targetaddr); if (rt == NULL) { rt = ieee80211_mesh_rt_add(vap, prep->prep_targetaddr); if (rt == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add PREP path to %6D", prep->prep_targetaddr, ":"); vap->iv_stats.is_mesh_rtaddfailed++; return; } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "adding target %6D", prep->prep_targetaddr, ":"); } hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); /* update path metric */ metric = prep->prep_metric + ms->ms_pmetric->mpm_metric(ni); if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) { if (HWMP_SEQ_LT(prep->prep_targetseq, hr->hr_seq)) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "discard PREP from %6D, old seq no %u < %u", prep->prep_targetaddr, ":", prep->prep_targetseq, hr->hr_seq); return; } else if (HWMP_SEQ_LEQ(prep->prep_targetseq, hr->hr_seq) && metric > rt->rt_metric) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "discard PREP from %6D, new metric %u > %u", prep->prep_targetaddr, ":", metric, rt->rt_metric); return; } } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "%s path to %6D, hopcount %d:%d metric %d:%d", rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID ? "prefer" : "update", prep->prep_targetaddr, ":", rt->rt_nhops, prep->prep_hopcount + 1, rt->rt_metric, metric); hr->hr_seq = prep->prep_targetseq; hr->hr_preqretries = 0; IEEE80211_ADDR_COPY(rt->rt_nexthop, ni->ni_macaddr); rt->rt_metric = metric; rt->rt_nhops = prep->prep_hopcount + 1; ieee80211_mesh_rt_update(rt, prep->prep_lifetime); if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_DISCOVER) { /* discovery complete */ rt->rt_flags &= ~IEEE80211_MESHRT_FLAGS_DISCOVER; } rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID; /* mark valid */ /* Update forwarding information to TA if metric improves */ hwmp_update_transmitter(vap, ni, "PREP"); /* * If it's NOT for us, propagate the PREP */ if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, prep->prep_origaddr) && prep->prep_ttl > 1 && prep->prep_hopcount < hs->hs_maxhops) { struct ieee80211_meshprep_ie pprep; /* propagated PREP */ /* * NB: We should already have setup the path to orig * mesh STA when we propagated PREQ to target mesh STA, * no PREP is generated without a corresponding PREQ. * XXX: for now just ignore. */ if (rtorig == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "received PREP for an unknown orig(%6D)", prep->prep_origaddr, ":"); return; } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "propagate PREP from %6D", prep->prep_targetaddr, ":"); memcpy(&pprep, prep, sizeof(pprep)); pprep.prep_hopcount += 1; pprep.prep_ttl -= 1; pprep.prep_metric += ms->ms_pmetric->mpm_metric(ni); hwmp_send_prep(vap, rtorig->rt_nexthop, &pprep); /* precursor list for the Target Mesh STA Address is updated */ } /* * Check if we received a PREP w/ AE and store target external address. * We may store target external address if recevied PREP w/ AE * and we are not final destination */ if (prep->prep_flags & IEEE80211_MESHPREP_FLAGS_AE) { rtext = ieee80211_mesh_rt_find(vap, prep->prep_target_ext_addr); if (rtext == NULL) { rtext = ieee80211_mesh_rt_add(vap, prep->prep_target_ext_addr); if (rtext == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add PREP path to proxy %6D", prep->prep_targetaddr, ":"); vap->iv_stats.is_mesh_rtaddfailed++; return; } } IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "%s path to %6D, hopcount %d:%d metric %d:%d", rtext->rt_flags & IEEE80211_MESHRT_FLAGS_VALID ? "prefer" : "update", prep->prep_target_ext_addr, ":", rtext->rt_nhops, prep->prep_hopcount + 1, rtext->rt_metric, metric); rtext->rt_flags = IEEE80211_MESHRT_FLAGS_PROXY | IEEE80211_MESHRT_FLAGS_VALID; IEEE80211_ADDR_COPY(rtext->rt_dest, prep->prep_target_ext_addr); IEEE80211_ADDR_COPY(rtext->rt_mesh_gate, prep->prep_targetaddr); IEEE80211_ADDR_COPY(rtext->rt_nexthop, wh->i_addr2); rtext->rt_metric = metric; rtext->rt_lifetime = prep->prep_lifetime; rtext->rt_nhops = prep->prep_hopcount + 1; rtext->rt_ext_seq = prep->prep_origseq; /* new proxy seq */ /* * XXX: proxy entries have no HWMP priv data, * nullify them to be sure? */ } /* * Check for frames queued awaiting path discovery. * XXX probably can tell exactly and avoid remove call * NB: hash may have false matches, if so they will get * stuck back on the stageq because there won't be * a path. */ addr = prep->prep_flags & IEEE80211_MESHPREP_FLAGS_AE ? prep->prep_target_ext_addr : prep->prep_targetaddr; m = ieee80211_ageq_remove(&ic->ic_stageq, (struct ieee80211_node *)(uintptr_t) ieee80211_mac_hash(ic, addr)); /* either dest or ext_dest */ /* * All frames in the stageq here should be non-M_ENCAP; or things * will get very unhappy. */ for (; m != NULL; m = next) { next = m->m_nextpkt; m->m_nextpkt = NULL; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "flush queued frame %p len %d", m, m->m_pkthdr.len); /* * If the mbuf has M_ENCAP set, ensure we free it. * Note that after if_transmit() is called, m is invalid. */ (void) ieee80211_vap_xmitpkt(vap, m); } #undef IS_PROXY #undef PROXIED_BY_US } static int hwmp_send_prep(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshprep_ie *prep) { /* NB: there's no PREP minimum interval. */ /* * mesh prep action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] mesh path reply */ prep->prep_ie = IEEE80211_ELEMID_MESHPREP; prep->prep_len = prep->prep_flags & IEEE80211_MESHPREP_FLAGS_AE ? IEEE80211_MESHPREP_BASE_SZ_AE : IEEE80211_MESHPREP_BASE_SZ; return hwmp_send_action(vap, da, (uint8_t *)prep, prep->prep_len + 2); } #define PERR_DFLAGS(n) perr.perr_dests[n].dest_flags #define PERR_DADDR(n) perr.perr_dests[n].dest_addr #define PERR_DSEQ(n) perr.perr_dests[n].dest_seq #define PERR_DRCODE(n) perr.perr_dests[n].dest_rcode static void hwmp_peerdown(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshperr_ie perr; struct ieee80211_mesh_route *rt; struct ieee80211_hwmp_route *hr; rt = ieee80211_mesh_rt_find(vap, ni->ni_macaddr); if (rt == NULL) return; hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "%s", "delete route entry"); perr.perr_ttl = ms->ms_ttl; perr.perr_ndests = 1; PERR_DFLAGS(0) = 0; if (hr->hr_seq == 0) PERR_DFLAGS(0) |= IEEE80211_MESHPERR_DFLAGS_USN; PERR_DFLAGS(0) |= IEEE80211_MESHPERR_DFLAGS_RC; IEEE80211_ADDR_COPY(PERR_DADDR(0), rt->rt_dest); PERR_DSEQ(0) = ++hr->hr_seq; PERR_DRCODE(0) = IEEE80211_REASON_MESH_PERR_DEST_UNREACH; /* NB: flush everything passing through peer */ ieee80211_mesh_rt_flush_peer(vap, ni->ni_macaddr); hwmp_send_perr(vap, broadcastaddr, &perr); } #undef PERR_DFLAGS #undef PERR_DADDR #undef PERR_DSEQ #undef PERR_DRCODE #define PERR_DFLAGS(n) perr->perr_dests[n].dest_flags #define PERR_DADDR(n) perr->perr_dests[n].dest_addr #define PERR_DSEQ(n) perr->perr_dests[n].dest_seq #define PERR_DEXTADDR(n) perr->perr_dests[n].dest_ext_addr static void hwmp_recv_perr(struct ieee80211vap *vap, struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_meshperr_ie *perr) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt = NULL; struct ieee80211_mesh_route *rt_ext = NULL; struct ieee80211_hwmp_route *hr; struct ieee80211_meshperr_ie *pperr = NULL; int i, j = 0, forward = 0; IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "received PERR from %6D", wh->i_addr2, ":"); /* * if forwarding is true, prepare pperr */ if (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) { forward = 1; pperr = IEEE80211_MALLOC(sizeof(*perr) + 31*sizeof(*perr->perr_dests), M_80211_MESH_PERR, IEEE80211_M_NOWAIT); /* XXX: magic number, 32 err dests */ } /* * Acceptance criteria: check if we have forwarding information * stored about destination, and that nexthop == TA of this PERR. * NB: we also build a new PERR to propagate in case we should forward. */ for (i = 0; i < perr->perr_ndests; i++) { rt = ieee80211_mesh_rt_find(vap, PERR_DADDR(i)); if (rt == NULL) continue; if (!IEEE80211_ADDR_EQ(rt->rt_nexthop, wh->i_addr2)) continue; /* found and accepted a PERR ndest element, process it... */ if (forward) memcpy(&pperr->perr_dests[j], &perr->perr_dests[i], sizeof(*perr->perr_dests)); hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); switch(PERR_DFLAGS(i)) { case (IEEE80211_REASON_MESH_PERR_NO_FI): if (PERR_DSEQ(i) == 0) { hr->hr_seq++; if (forward) { pperr->perr_dests[j].dest_seq = hr->hr_seq; } } else { hr->hr_seq = PERR_DSEQ(i); } rt->rt_flags &= ~IEEE80211_MESHRT_FLAGS_VALID; j++; break; case (IEEE80211_REASON_MESH_PERR_DEST_UNREACH): if(HWMP_SEQ_GT(PERR_DSEQ(i), hr->hr_seq)) { hr->hr_seq = PERR_DSEQ(i); rt->rt_flags &= ~IEEE80211_MESHRT_FLAGS_VALID; j++; } break; case (IEEE80211_REASON_MESH_PERR_NO_PROXY): rt_ext = ieee80211_mesh_rt_find(vap, PERR_DEXTADDR(i)); if (rt_ext != NULL) { rt_ext->rt_flags &= ~IEEE80211_MESHRT_FLAGS_VALID; j++; } break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_HWMP, wh, NULL, "PERR, unknown reason code %u\n", PERR_DFLAGS(i)); goto done; /* XXX: stats?? */ } ieee80211_mesh_rt_flush_peer(vap, PERR_DADDR(i)); KASSERT(j < 32, ("PERR, error ndest >= 32 (%u)", j)); } if (j == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_HWMP, wh, NULL, "%s", "PERR not accepted"); goto done; /* XXX: stats?? */ } /* * Propagate the PERR if we previously found it on our routing table. */ if (forward && perr->perr_ttl > 1) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "propagate PERR from %6D", wh->i_addr2, ":"); pperr->perr_ndests = j; pperr->perr_ttl--; hwmp_send_perr(vap, broadcastaddr, pperr); } done: if (pperr != NULL) IEEE80211_FREE(pperr, M_80211_MESH_PERR); } #undef PERR_DFLAGS #undef PERR_DADDR #undef PERR_DSEQ #undef PERR_DEXTADDR static int hwmp_send_perr(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshperr_ie *perr) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; int i; uint8_t length = 0; /* * Enforce PERR interval. */ if (ratecheck(&hs->hs_lastperr, &ieee80211_hwmp_perrminint) == 0) return EALREADY; getmicrouptime(&hs->hs_lastperr); /* * mesh perr action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] mesh path error */ perr->perr_ie = IEEE80211_ELEMID_MESHPERR; length = IEEE80211_MESHPERR_BASE_SZ; for (i = 0; iperr_ndests; i++) { if (perr->perr_dests[i].dest_flags & IEEE80211_MESHPERR_FLAGS_AE) { length += IEEE80211_MESHPERR_DEST_SZ_AE; continue ; } length += IEEE80211_MESHPERR_DEST_SZ; } perr->perr_len =length; return hwmp_send_action(vap, da, (uint8_t *)perr, perr->perr_len+2); } /* * Called from the rest of the net80211 code (mesh code for example). * NB: IEEE80211_REASON_MESH_PERR_DEST_UNREACH can be trigger by the fact that * a mesh STA is unable to forward an MSDU/MMPDU to a next-hop mesh STA. */ #define PERR_DFLAGS(n) perr.perr_dests[n].dest_flags #define PERR_DADDR(n) perr.perr_dests[n].dest_addr #define PERR_DSEQ(n) perr.perr_dests[n].dest_seq #define PERR_DEXTADDR(n) perr.perr_dests[n].dest_ext_addr #define PERR_DRCODE(n) perr.perr_dests[n].dest_rcode static void hwmp_senderror(struct ieee80211vap *vap, const uint8_t addr[IEEE80211_ADDR_LEN], struct ieee80211_mesh_route *rt, int rcode) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_hwmp_route *hr = NULL; struct ieee80211_meshperr_ie perr; if (rt != NULL) hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); perr.perr_ndests = 1; perr.perr_ttl = ms->ms_ttl; PERR_DFLAGS(0) = 0; PERR_DRCODE(0) = rcode; switch (rcode) { case IEEE80211_REASON_MESH_PERR_NO_FI: IEEE80211_ADDR_COPY(PERR_DADDR(0), addr); PERR_DSEQ(0) = 0; /* reserved */ break; case IEEE80211_REASON_MESH_PERR_NO_PROXY: KASSERT(rt != NULL, ("no proxy info for sending PERR")); KASSERT(rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY, ("route is not marked proxy")); PERR_DFLAGS(0) |= IEEE80211_MESHPERR_FLAGS_AE; IEEE80211_ADDR_COPY(PERR_DADDR(0), vap->iv_myaddr); PERR_DSEQ(0) = rt->rt_ext_seq; IEEE80211_ADDR_COPY(PERR_DEXTADDR(0), addr); break; case IEEE80211_REASON_MESH_PERR_DEST_UNREACH: KASSERT(rt != NULL, ("no route info for sending PERR")); IEEE80211_ADDR_COPY(PERR_DADDR(0), addr); PERR_DSEQ(0) = hr->hr_seq; break; default: KASSERT(0, ("unknown reason code for HWMP PERR (%u)", rcode)); } hwmp_send_perr(vap, broadcastaddr, &perr); } #undef PERR_DFLAGS #undef PEER_DADDR #undef PERR_DSEQ #undef PERR_DEXTADDR #undef PERR_DRCODE static void hwmp_recv_rann(struct ieee80211vap *vap, struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_meshrann_ie *rann) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_route *rt = NULL; struct ieee80211_hwmp_route *hr; struct ieee80211_meshpreq_ie preq; struct ieee80211_meshrann_ie prann; if (IEEE80211_ADDR_EQ(rann->rann_addr, vap->iv_myaddr)) return; rt = ieee80211_mesh_rt_find(vap, rann->rann_addr); if (rt != NULL && rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) { hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); /* Acceptance criteria: if RANN.seq < stored seq, discard RANN */ if (HWMP_SEQ_LT(rann->rann_seq, hr->hr_seq)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_HWMP, wh, NULL, "RANN seq %u < %u", rann->rann_seq, hr->hr_seq); return; } /* Acceptance criteria: if RANN.seq == stored seq AND * RANN.metric > stored metric, discard RANN */ if (HWMP_SEQ_EQ(rann->rann_seq, hr->hr_seq) && rann->rann_metric > rt->rt_metric) { IEEE80211_DISCARD(vap, IEEE80211_MSG_HWMP, wh, NULL, "RANN metric %u > %u", rann->rann_metric, rt->rt_metric); return; } } /* RANN ACCEPTED */ ieee80211_hwmp_rannint = rann->rann_interval; /* XXX: mtx lock? */ if (rt == NULL) { rt = ieee80211_mesh_rt_add(vap, rann->rann_addr); if (rt == NULL) { IEEE80211_DISCARD(vap, IEEE80211_MSG_HWMP, wh, NULL, "unable to add mac for RANN root %6D", rann->rann_addr, ":"); vap->iv_stats.is_mesh_rtaddfailed++; return; } } hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); /* Check if root is a mesh gate, mark it */ if (rann->rann_flags & IEEE80211_MESHRANN_FLAGS_GATE) { struct ieee80211_mesh_gate_route *gr; rt->rt_flags |= IEEE80211_MESHRT_FLAGS_GATE; gr = ieee80211_mesh_mark_gate(vap, rann->rann_addr, rt); gr->gr_lastseq = 0; /* NOT GANN */ } /* discovery timeout */ ieee80211_mesh_rt_update(rt, ticks_to_msecs(ieee80211_hwmp_roottimeout)); preq.preq_flags = IEEE80211_MESHPREQ_FLAGS_AM; preq.preq_hopcount = 0; preq.preq_ttl = ms->ms_ttl; preq.preq_id = 0; /* reserved */ IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr); preq.preq_origseq = ++hs->hs_seq; preq.preq_lifetime = ieee80211_hwmp_roottimeout; preq.preq_metric = IEEE80211_MESHLMETRIC_INITIALVAL; preq.preq_tcount = 1; preq.preq_targets[0].target_flags = IEEE80211_MESHPREQ_TFLAGS_TO; /* NB: IEEE80211_MESHPREQ_TFLAGS_USN = 0 implicitly implied */ IEEE80211_ADDR_COPY(preq.preq_targets[0].target_addr, rann->rann_addr); preq.preq_targets[0].target_seq = rann->rann_seq; /* XXX: if rootconfint have not passed, we built this preq in vain */ hwmp_send_preq(vap, wh->i_addr2, &preq, &hr->hr_lastrootconf, &ieee80211_hwmp_rootconfint); /* propagate a RANN */ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID && rann->rann_ttl > 1 && ms->ms_flags & IEEE80211_MESHFLAGS_FWD) { hr->hr_seq = rann->rann_seq; memcpy(&prann, rann, sizeof(prann)); prann.rann_hopcount += 1; prann.rann_ttl -= 1; prann.rann_metric += ms->ms_pmetric->mpm_metric(ni); hwmp_send_rann(vap, broadcastaddr, &prann); } } static int hwmp_send_rann(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshrann_ie *rann) { /* * mesh rann action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] root announcement */ rann->rann_ie = IEEE80211_ELEMID_MESHRANN; rann->rann_len = IEEE80211_MESHRANN_BASE_SZ; return hwmp_send_action(vap, da, (uint8_t *)rann, rann->rann_len + 2); } #define PREQ_TFLAGS(n) preq.preq_targets[n].target_flags #define PREQ_TADDR(n) preq.preq_targets[n].target_addr #define PREQ_TSEQ(n) preq.preq_targets[n].target_seq static void hwmp_rediscover_cb(void *arg) { struct ieee80211_mesh_route *rt = arg; struct ieee80211vap *vap = rt->rt_vap; struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_hwmp_route *hr; struct ieee80211_meshpreq_ie preq; /* Optimize: storing first preq? */ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) return ; /* nothing to do */ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); if (hr->hr_preqretries >= ieee80211_hwmp_maxpreq_retries) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, rt->rt_dest, "%s", "max number of discovery, send queued frames to GATE"); ieee80211_mesh_forward_to_gates(vap, rt); vap->iv_stats.is_mesh_fwd_nopath++; return ; /* XXX: flush queue? */ } hr->hr_preqretries++; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, rt->rt_dest, "start path rediscovery , target seq %u", hr->hr_seq); /* * Try to discover the path for this node. * Group addressed PREQ Case A */ preq.preq_flags = 0; preq.preq_hopcount = 0; preq.preq_ttl = ms->ms_ttl; preq.preq_id = ++hs->hs_preqid; IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr); preq.preq_origseq = hr->hr_origseq; preq.preq_lifetime = ticks_to_msecs(ieee80211_hwmp_pathtimeout); preq.preq_metric = IEEE80211_MESHLMETRIC_INITIALVAL; preq.preq_tcount = 1; IEEE80211_ADDR_COPY(PREQ_TADDR(0), rt->rt_dest); PREQ_TFLAGS(0) = 0; if (ieee80211_hwmp_targetonly) PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_TO; PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_USN; PREQ_TSEQ(0) = 0; /* RESERVED when USN flag is set */ /* XXX check return value */ hwmp_send_preq(vap, broadcastaddr, &preq, &hr->hr_lastpreq, &ieee80211_hwmp_preqminint); callout_reset(&rt->rt_discovery, ieee80211_hwmp_net_diameter_traversaltime * 2, hwmp_rediscover_cb, rt); } static struct ieee80211_node * hwmp_discover(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN], struct mbuf *m) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt = NULL; struct ieee80211_hwmp_route *hr; struct ieee80211_meshpreq_ie preq; struct ieee80211_node *ni; int sendpreq = 0; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mesh vap, opmode %d", vap->iv_opmode)); KASSERT(!IEEE80211_ADDR_EQ(vap->iv_myaddr, dest), ("%s: discovering self!", __func__)); ni = NULL; if (!IEEE80211_IS_MULTICAST(dest)) { rt = ieee80211_mesh_rt_find(vap, dest); if (rt == NULL) { rt = ieee80211_mesh_rt_add(vap, dest); if (rt == NULL) { IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni, "unable to add discovery path to %6D", dest, ":"); vap->iv_stats.is_mesh_rtaddfailed++; goto done; } } hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route); if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_DISCOVER) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, dest, "%s", "already discovering queue frame until path found"); sendpreq = 1; goto done; } if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { if (hr->hr_lastdiscovery != 0 && (ticks - hr->hr_lastdiscovery < (ieee80211_hwmp_net_diameter_traversaltime * 2))) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, dest, NULL, "%s", "too frequent discovery requeust"); sendpreq = 1; goto done; } hr->hr_lastdiscovery = ticks; if (hr->hr_preqretries >= ieee80211_hwmp_maxpreq_retries) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, dest, NULL, "%s", "no valid path , max number of discovery"); vap->iv_stats.is_mesh_fwd_nopath++; goto done; } rt->rt_flags = IEEE80211_MESHRT_FLAGS_DISCOVER; hr->hr_preqretries++; if (hr->hr_origseq == 0) hr->hr_origseq = ++hs->hs_seq; rt->rt_metric = IEEE80211_MESHLMETRIC_INITIALVAL; sendpreq = 1; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, dest, "start path discovery (src %s), target seq %u", m == NULL ? "" : ether_sprintf( mtod(m, struct ether_header *)->ether_shost), hr->hr_seq); /* * Try to discover the path for this node. * Group addressed PREQ Case A */ preq.preq_flags = 0; preq.preq_hopcount = 0; preq.preq_ttl = ms->ms_ttl; preq.preq_id = ++hs->hs_preqid; IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr); preq.preq_origseq = hr->hr_origseq; preq.preq_lifetime = ticks_to_msecs(ieee80211_hwmp_pathtimeout); preq.preq_metric = IEEE80211_MESHLMETRIC_INITIALVAL; preq.preq_tcount = 1; IEEE80211_ADDR_COPY(PREQ_TADDR(0), dest); PREQ_TFLAGS(0) = 0; if (ieee80211_hwmp_targetonly) PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_TO; PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_USN; PREQ_TSEQ(0) = 0; /* RESERVED when USN flag is set */ /* XXX check return value */ hwmp_send_preq(vap, broadcastaddr, &preq, &hr->hr_lastpreq, &ieee80211_hwmp_preqminint); callout_reset(&rt->rt_discovery, ieee80211_hwmp_net_diameter_traversaltime * 2, hwmp_rediscover_cb, rt); } if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) ni = ieee80211_find_txnode(vap, rt->rt_nexthop); } else { ni = ieee80211_find_txnode(vap, dest); /* NB: if null then we leak mbuf */ KASSERT(ni != NULL, ("leak mcast frame")); return ni; } done: if (ni == NULL && m != NULL) { if (sendpreq) { struct ieee80211com *ic = vap->iv_ic; /* * Queue packet for transmit when path discovery * completes. If discovery never completes the * frame will be flushed by way of the aging timer. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, dest, "%s", "queue frame until path found"); MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); m->m_pkthdr.rcvif = (void *)(uintptr_t) ieee80211_mac_hash(ic, dest); /* XXX age chosen randomly */ ieee80211_ageq_append(&ic->ic_stageq, m, IEEE80211_INACT_WAIT); } else { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_HWMP, dest, NULL, "%s", "no valid path to this node"); m_freem(m); } } return ni; } #undef PREQ_TFLAGS #undef PREQ_TADDR #undef PREQ_TSEQ static int hwmp_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_HWMP_ROOTMODE: ireq->i_val = hs->hs_rootmode; break; case IEEE80211_IOC_HWMP_MAXHOPS: ireq->i_val = hs->hs_maxhops; break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_GET(hwmp, hwmp_ioctl_get80211); static int hwmp_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_hwmp_state *hs = vap->iv_hwmp; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_HWMP_ROOTMODE: if (ireq->i_val < 0 || ireq->i_val > 3) return EINVAL; hs->hs_rootmode = ireq->i_val; hwmp_rootmode_setup(vap); break; case IEEE80211_IOC_HWMP_MAXHOPS: if (ireq->i_val <= 0 || ireq->i_val > 255) return EINVAL; hs->hs_maxhops = ireq->i_val; break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_SET(hwmp, hwmp_ioctl_set80211); diff --git a/sys/net80211/ieee80211_input.c b/sys/net80211/ieee80211_input.c index 683c8d5a06f1..b7d9c6f9457e 100644 --- a/sys/net80211/ieee80211_input.c +++ b/sys/net80211/ieee80211_input.c @@ -1,1059 +1,1059 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_MESH #include #endif #include #ifdef INET #include #include #endif static void ieee80211_process_mimo(struct ieee80211_node *ni, struct ieee80211_rx_stats *rx) { int i; /* Verify the required MIMO bits are set */ if ((rx->r_flags & (IEEE80211_R_C_CHAIN | IEEE80211_R_C_NF | IEEE80211_R_C_RSSI)) != (IEEE80211_R_C_CHAIN | IEEE80211_R_C_NF | IEEE80211_R_C_RSSI)) return; /* XXX This assumes the MIMO radios have both ctl and ext chains */ for (i = 0; i < MIN(rx->c_chain, IEEE80211_MAX_CHAINS); i++) { IEEE80211_RSSI_LPF(ni->ni_mimo_rssi_ctl[i], rx->c_rssi_ctl[i]); IEEE80211_RSSI_LPF(ni->ni_mimo_rssi_ext[i], rx->c_rssi_ext[i]); } /* XXX This also assumes the MIMO radios have both ctl and ext chains */ for(i = 0; i < MIN(rx->c_chain, IEEE80211_MAX_CHAINS); i++) { ni->ni_mimo_noise_ctl[i] = rx->c_nf_ctl[i]; ni->ni_mimo_noise_ext[i] = rx->c_nf_ext[i]; } ni->ni_mimo_chains = rx->c_chain; } int ieee80211_input_mimo(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211_rx_stats rxs; /* try to read stats from mbuf */ bzero(&rxs, sizeof(rxs)); if (ieee80211_get_rx_params(m, &rxs) != 0) return (-1); /* XXX should assert IEEE80211_R_NF and IEEE80211_R_RSSI are set */ ieee80211_process_mimo(ni, &rxs); //return ieee80211_input(ni, m, rx->rssi, rx->nf); return ni->ni_vap->iv_input(ni, m, &rxs, rxs.c_rssi, rxs.c_nf); } int ieee80211_input_all(struct ieee80211com *ic, struct mbuf *m, int rssi, int nf) { struct ieee80211_rx_stats rx; rx.r_flags = IEEE80211_R_NF | IEEE80211_R_RSSI; rx.c_nf = nf; rx.c_rssi = rssi; if (!ieee80211_add_rx_params(m, &rx)) return (-1); return ieee80211_input_mimo_all(ic, m); } int ieee80211_input_mimo_all(struct ieee80211com *ic, struct mbuf *m) { struct ieee80211vap *vap; int type = -1; m->m_flags |= M_BCAST; /* NB: mark for bpf tap'ing */ /* XXX locking */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ieee80211_node *ni; struct mbuf *mcopy; /* NB: could check for IFF_UP but this is cheaper */ if (vap->iv_state == IEEE80211_S_INIT) continue; /* * WDS vap's only receive directed traffic from the * station at the ``far end''. That traffic should * be passed through the AP vap the station is associated * to--so don't spam them with mcast frames. */ if (vap->iv_opmode == IEEE80211_M_WDS) continue; if (TAILQ_NEXT(vap, iv_next) != NULL) { /* * Packet contents are changed by ieee80211_decap * so do a deep copy of the packet. * NB: tags are copied too. */ - mcopy = m_dup(m, M_NOWAIT); + mcopy = m_dup(m, IEEE80211_M_NOWAIT); if (mcopy == NULL) { /* XXX stat+msg */ continue; } } else { mcopy = m; m = NULL; } ni = ieee80211_ref_node(vap->iv_bss); type = ieee80211_input_mimo(ni, mcopy); ieee80211_free_node(ni); } if (m != NULL) /* no vaps, reclaim mbuf */ m_freem(m); return type; } /* * This function reassembles fragments. * * XXX should handle 3 concurrent reassemblies per-spec. */ struct mbuf * ieee80211_defrag(struct ieee80211_node *ni, struct mbuf *m, int hdrspace, int has_decrypted) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); struct ieee80211_frame *lwh; uint16_t rxseq; uint8_t fragno; uint8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG; struct mbuf *mfrag; KASSERT(!IEEE80211_IS_MULTICAST(wh->i_addr1), ("multicast fragm?")); rxseq = le16toh(*(uint16_t *)wh->i_seq); fragno = rxseq & IEEE80211_SEQ_FRAG_MASK; /* Quick way out, if there's nothing to defragment */ if (!more_frag && fragno == 0 && ni->ni_rxfrag[0] == NULL) return m; /* Temporarily set flag to remember if fragment was encrypted. */ /* XXX use a non-packet altering storage for this in the future. */ if (has_decrypted) wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; /* * Remove frag to insure it doesn't get reaped by timer. */ if (ni->ni_table == NULL) { /* * Should never happen. If the node is orphaned (not in * the table) then input packets should not reach here. * Otherwise, a concurrent request that yanks the table * should be blocked by other interlocking and/or by first * shutting the driver down. Regardless, be defensive * here and just bail */ /* XXX need msg+stat */ m_freem(m); return NULL; } IEEE80211_NODE_LOCK(ni->ni_table); mfrag = ni->ni_rxfrag[0]; ni->ni_rxfrag[0] = NULL; IEEE80211_NODE_UNLOCK(ni->ni_table); /* * Validate new fragment is in order and * related to the previous ones. */ if (mfrag != NULL) { uint16_t last_rxseq; lwh = mtod(mfrag, struct ieee80211_frame *); last_rxseq = le16toh(*(uint16_t *)lwh->i_seq); /* * NB: check seq # and frag together. Also check that both * fragments are plaintext or that both are encrypted. */ if (rxseq == last_rxseq+1 && IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1) && IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2) && !((wh->i_fc[1] ^ lwh->i_fc[1]) & IEEE80211_FC1_PROTECTED)) { /* XXX clear MORE_FRAG bit? */ /* track last seqnum and fragno */ *(uint16_t *) lwh->i_seq = *(uint16_t *) wh->i_seq; m_adj(m, hdrspace); /* strip header */ m_catpkt(mfrag, m); /* concatenate */ } else { /* * Unrelated fragment or no space for it, * clear current fragments. */ m_freem(mfrag); mfrag = NULL; } } if (mfrag == NULL) { if (fragno != 0) { /* !first fragment, discard */ vap->iv_stats.is_rx_defrag++; IEEE80211_NODE_STAT(ni, rx_defrag); m_freem(m); return NULL; } mfrag = m; } if (more_frag) { /* more to come, save */ ni->ni_rxfragstamp = ticks; ni->ni_rxfrag[0] = mfrag; mfrag = NULL; } /* Remember to clear protected flag that was temporarily set. */ if (mfrag != NULL) { wh = mtod(mfrag, struct ieee80211_frame *); wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; } return mfrag; } void ieee80211_deliver_data(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); struct ifnet *ifp = vap->iv_ifp; /* clear driver/net80211 flags before passing up */ m->m_flags &= ~(M_MCAST | M_BCAST); m_clrprotoflags(m); /* NB: see hostap_deliver_data, this path doesn't handle hostap */ KASSERT(vap->iv_opmode != IEEE80211_M_HOSTAP, ("gack, hostap")); /* * Do accounting. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); IEEE80211_NODE_STAT(ni, rx_data); IEEE80211_NODE_STAT_ADD(ni, rx_bytes, m->m_pkthdr.len); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { if (ETHER_IS_BROADCAST(eh->ether_dhost)) m->m_flags |= M_BCAST; else m->m_flags |= M_MCAST; IEEE80211_NODE_STAT(ni, rx_mcast); } else IEEE80211_NODE_STAT(ni, rx_ucast); m->m_pkthdr.rcvif = ifp; if (ni->ni_vlan != 0) { /* attach vlan tag */ m->m_pkthdr.ether_vtag = ni->ni_vlan; m->m_flags |= M_VLANTAG; } ifp->if_input(ifp, m); } struct mbuf * ieee80211_decap(struct ieee80211vap *vap, struct mbuf *m, int hdrlen, uint8_t qos) { struct ieee80211_qosframe_addr4 wh; struct ether_header *eh; struct llc *llc; KASSERT(hdrlen <= sizeof(wh), ("hdrlen %d > max %zd", hdrlen, sizeof(wh))); if (m->m_len < hdrlen + sizeof(*llc) && (m = m_pullup(m, hdrlen + sizeof(*llc))) == NULL) { vap->iv_stats.is_rx_tooshort++; /* XXX msg */ return NULL; } memcpy(&wh, mtod(m, caddr_t), hdrlen); llc = (struct llc *)(mtod(m, caddr_t) + hdrlen); if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 && llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0 && /* NB: preserve AppleTalk frames that have a native SNAP hdr */ !(llc->llc_snap.ether_type == htons(ETHERTYPE_AARP) || llc->llc_snap.ether_type == htons(ETHERTYPE_IPX)) && /* Do not want to touch A-MSDU frames. */ !(qos & IEEE80211_QOS_AMSDU)) { m_adj(m, hdrlen + sizeof(struct llc) - sizeof(*eh)); llc = NULL; } else { m_adj(m, hdrlen - sizeof(*eh)); } eh = mtod(m, struct ether_header *); switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3); break; case IEEE80211_FC1_DIR_DSTODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4); break; } #ifndef __NO_STRICT_ALIGNMENT if (!ALIGNED_POINTER(mtod(m, caddr_t) + sizeof(*eh), uint32_t)) { m = ieee80211_realign(vap, m, sizeof(*eh)); if (m == NULL) return NULL; } #endif /* !__NO_STRICT_ALIGNMENT */ if (llc != NULL) { eh = mtod(m, struct ether_header *); eh->ether_type = htons(m->m_pkthdr.len - sizeof(*eh)); } return m; } /* * Decap a frame encapsulated in a fast-frame/A-MSDU. */ struct mbuf * ieee80211_decap1(struct mbuf *m, int *framelen) { #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) struct ether_header *eh; struct llc *llc; const uint8_t llc_hdr_mac[ETHER_ADDR_LEN] = { /* MAC address matching the 802.2 LLC header */ LLC_SNAP_LSAP, LLC_SNAP_LSAP, LLC_UI, 0, 0, 0 }; /* * The frame has an 802.3 header followed by an 802.2 * LLC header. The encapsulated frame length is in the * first header type field; save that and overwrite it * with the true type field found in the second. Then * copy the 802.3 header up to where it belongs and * adjust the mbuf contents to remove the void. */ if (m->m_len < FF_LLC_SIZE && (m = m_pullup(m, FF_LLC_SIZE)) == NULL) return NULL; eh = mtod(m, struct ether_header *); /* 802.3 header is first */ /* * Detect possible attack where a single 802.11 frame is processed * as an A-MSDU frame due to an adversary setting the A-MSDU present * bit in the 802.11 QoS header. [FragAttacks] */ if (memcmp(eh->ether_dhost, llc_hdr_mac, ETHER_ADDR_LEN) == 0) return NULL; llc = (struct llc *)&eh[1]; /* 802.2 header follows */ *framelen = ntohs(eh->ether_type) /* encap'd frame size */ + sizeof(struct ether_header) - sizeof(struct llc); eh->ether_type = llc->llc_un.type_snap.ether_type; ovbcopy(eh, mtod(m, uint8_t *) + sizeof(struct llc), sizeof(struct ether_header)); m_adj(m, sizeof(struct llc)); return m; #undef FF_LLC_SIZE } /* * Install received rate set information in the node's state block. */ int ieee80211_setup_rates(struct ieee80211_node *ni, const uint8_t *rates, const uint8_t *xrates, int flags) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_rateset *rs = &ni->ni_rates; memset(rs, 0, sizeof(*rs)); rs->rs_nrates = rates[1]; memcpy(rs->rs_rates, rates + 2, rs->rs_nrates); if (xrates != NULL) { uint8_t nxrates; /* * Tack on 11g extended supported rate element. */ nxrates = xrates[1]; if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) { nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates; IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni, "extended rate set too large; only using " "%u of %u rates", nxrates, xrates[1]); vap->iv_stats.is_rx_rstoobig++; } memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates); rs->rs_nrates += nxrates; } return ieee80211_fix_rate(ni, rs, flags); } /* * Send a management frame error response to the specified * station. If ni is associated with the station then use * it; otherwise allocate a temporary node suitable for * transmitting the frame and then free the reference so * it will go away as soon as the frame has been transmitted. */ void ieee80211_send_error(struct ieee80211_node *ni, const uint8_t mac[IEEE80211_ADDR_LEN], int subtype, int arg) { struct ieee80211vap *vap = ni->ni_vap; int istmp; if (ni == vap->iv_bss) { if (vap->iv_state != IEEE80211_S_RUN) { /* * XXX hack until we get rid of this routine. * We can be called prior to the vap reaching * run state under certain conditions in which * case iv_bss->ni_chan will not be setup. * Check for this explicitly and and just ignore * the request. */ return; } ni = ieee80211_tmp_node(vap, mac); if (ni == NULL) { /* XXX msg */ return; } istmp = 1; } else istmp = 0; IEEE80211_SEND_MGMT(ni, subtype, arg); if (istmp) ieee80211_free_node(ni); } int ieee80211_alloc_challenge(struct ieee80211_node *ni) { if (ni->ni_challenge == NULL) ni->ni_challenge = (uint32_t *) IEEE80211_MALLOC(IEEE80211_CHALLENGE_LEN, M_80211_NODE, IEEE80211_M_NOWAIT); if (ni->ni_challenge == NULL) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "%s", "shared key challenge alloc failed"); /* XXX statistic */ } return (ni->ni_challenge != NULL); } /* * Parse a Beacon or ProbeResponse frame and return the * useful information in an ieee80211_scanparams structure. * Status is set to 0 if no problems were found; otherwise * a bitmask of IEEE80211_BPARSE_* items is returned that * describes the problems detected. */ int ieee80211_parse_beacon(struct ieee80211_node *ni, struct mbuf *m, struct ieee80211_channel *rxchan, struct ieee80211_scanparams *scan) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; uint8_t *frm, *efrm; wh = mtod(m, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m, uint8_t *) + m->m_len; scan->status = 0; /* * beacon/probe response frame format * * XXX Update from 802.11-2012 - eg where HT is * [8] time stamp * [2] beacon interval * [2] capability information * [tlv] ssid * [tlv] supported rates * [tlv] country information * [tlv] channel switch announcement (CSA) * [tlv] parameter set (FH/DS) * [tlv] erp information * [tlv] extended supported rates * [tlv] WME * [tlv] WPA or RSN * [tlv] HT capabilities * [tlv] HT information * [tlv] VHT capabilities * [tlv] VHT information * [tlv] Atheros capabilities * [tlv] Mesh ID * [tlv] Mesh Configuration */ IEEE80211_VERIFY_LENGTH(efrm - frm, 12, return (scan->status = IEEE80211_BPARSE_BADIELEN)); memset(scan, 0, sizeof(*scan)); scan->tstamp = frm; frm += 8; scan->bintval = le16toh(*(uint16_t *)frm); frm += 2; scan->capinfo = le16toh(*(uint16_t *)frm); frm += 2; scan->bchan = ieee80211_chan2ieee(ic, rxchan); scan->chan = scan->bchan; scan->ies = frm; scan->ies_len = efrm - frm; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return (scan->status = IEEE80211_BPARSE_BADIELEN)); switch (*frm) { case IEEE80211_ELEMID_SSID: scan->ssid = frm; break; case IEEE80211_ELEMID_RATES: scan->rates = frm; break; case IEEE80211_ELEMID_COUNTRY: scan->country = frm; break; case IEEE80211_ELEMID_CSA: scan->csa = frm; break; case IEEE80211_ELEMID_QUIET: scan->quiet = frm; break; case IEEE80211_ELEMID_FHPARMS: if (ic->ic_phytype == IEEE80211_T_FH) { scan->fhdwell = le16dec(&frm[2]); scan->chan = IEEE80211_FH_CHAN(frm[4], frm[5]); scan->fhindex = frm[6]; } break; case IEEE80211_ELEMID_DSPARMS: /* * XXX hack this since depending on phytype * is problematic for multi-mode devices. */ if (ic->ic_phytype != IEEE80211_T_FH) scan->chan = frm[2]; break; case IEEE80211_ELEMID_TIM: /* XXX ATIM? */ scan->tim = frm; scan->timoff = frm - mtod(m, uint8_t *); break; case IEEE80211_ELEMID_IBSSPARMS: case IEEE80211_ELEMID_CFPARMS: case IEEE80211_ELEMID_PWRCNSTR: case IEEE80211_ELEMID_BSSLOAD: case IEEE80211_ELEMID_APCHANREP: /* NB: avoid debugging complaints */ break; case IEEE80211_ELEMID_XRATES: scan->xrates = frm; break; case IEEE80211_ELEMID_ERP: if (frm[1] != 1) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID, wh, "ERP", "bad len %u", frm[1]); vap->iv_stats.is_rx_elem_toobig++; break; } scan->erp = frm[2] | 0x100; break; case IEEE80211_ELEMID_HTCAP: scan->htcap = frm; break; case IEEE80211_ELEMID_VHT_CAP: scan->vhtcap = frm; break; case IEEE80211_ELEMID_VHT_OPMODE: scan->vhtopmode = frm; break; case IEEE80211_ELEMID_RSN: scan->rsn = frm; break; case IEEE80211_ELEMID_HTINFO: scan->htinfo = frm; break; #ifdef IEEE80211_SUPPORT_MESH case IEEE80211_ELEMID_MESHID: scan->meshid = frm; break; case IEEE80211_ELEMID_MESHCONF: scan->meshconf = frm; break; #endif /* Extended capabilities; nothing handles it for now */ case IEEE80211_ELEMID_EXTCAP: break; case IEEE80211_ELEMID_VENDOR: if (iswpaoui(frm)) scan->wpa = frm; else if (iswmeparam(frm) || iswmeinfo(frm)) scan->wme = frm; #ifdef IEEE80211_SUPPORT_SUPERG else if (isatherosoui(frm)) scan->ath = frm; #endif #ifdef IEEE80211_SUPPORT_TDMA else if (istdmaoui(frm)) scan->tdma = frm; #endif else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) { /* * Accept pre-draft HT ie's if the * standard ones have not been seen. */ if (ishtcapoui(frm)) { if (scan->htcap == NULL) scan->htcap = frm; } else if (ishtinfooui(frm)) { if (scan->htinfo == NULL) scan->htcap = frm; } } break; default: IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID, wh, "unhandled", "id %u, len %u", *frm, frm[1]); vap->iv_stats.is_rx_elem_unknown++; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(scan->rates, IEEE80211_RATE_MAXSIZE, scan->status |= IEEE80211_BPARSE_RATES_INVALID); if (scan->rates != NULL && scan->xrates != NULL) { /* * NB: don't process XRATES if RATES is missing. This * avoids a potential null ptr deref and should be ok * as the return code will already note RATES is missing * (so callers shouldn't otherwise process the frame). */ IEEE80211_VERIFY_ELEMENT(scan->xrates, IEEE80211_RATE_MAXSIZE - scan->rates[1], scan->status |= IEEE80211_BPARSE_XRATES_INVALID); } IEEE80211_VERIFY_ELEMENT(scan->ssid, IEEE80211_NWID_LEN, scan->status |= IEEE80211_BPARSE_SSID_INVALID); if (scan->chan != scan->bchan && ic->ic_phytype != IEEE80211_T_FH) { /* * Frame was received on a channel different from the * one indicated in the DS params element id; * silently discard it. * * NB: this can happen due to signal leakage. * But we should take it for FH phy because * the rssi value should be correct even for * different hop pattern in FH. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT, wh, NULL, "for off-channel %u (bchan=%u)", scan->chan, scan->bchan); vap->iv_stats.is_rx_chanmismatch++; scan->status |= IEEE80211_BPARSE_OFFCHAN; } if (!(IEEE80211_BINTVAL_MIN <= scan->bintval && scan->bintval <= IEEE80211_BINTVAL_MAX)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT, wh, NULL, "bogus beacon interval (%d TU)", (int) scan->bintval); vap->iv_stats.is_rx_badbintval++; scan->status |= IEEE80211_BPARSE_BINTVAL_INVALID; } if (scan->country != NULL) { /* * Validate we have at least enough data to extract * the country code. Not sure if we should return an * error instead of discarding the IE; consider this * being lenient as we don't depend on the data for * correct operation. */ IEEE80211_VERIFY_LENGTH(scan->country[1], 3 * sizeof(uint8_t), scan->country = NULL); } if (scan->csa != NULL) { /* * Validate Channel Switch Announcement; this must * be the correct length or we toss the frame. */ IEEE80211_VERIFY_LENGTH(scan->csa[1], 3 * sizeof(uint8_t), scan->status |= IEEE80211_BPARSE_CSA_INVALID); } #ifdef IEEE80211_SUPPORT_MESH if (scan->meshid != NULL) { IEEE80211_VERIFY_ELEMENT(scan->meshid, IEEE80211_MESHID_LEN, scan->status |= IEEE80211_BPARSE_MESHID_INVALID); } #endif /* * Process HT ie's. This is complicated by our * accepting both the standard ie's and the pre-draft * vendor OUI ie's that some vendors still use/require. */ if (scan->htcap != NULL) { IEEE80211_VERIFY_LENGTH(scan->htcap[1], scan->htcap[0] == IEEE80211_ELEMID_VENDOR ? 4 + sizeof(struct ieee80211_ie_htcap)-2 : sizeof(struct ieee80211_ie_htcap)-2, scan->htcap = NULL); } if (scan->htinfo != NULL) { IEEE80211_VERIFY_LENGTH(scan->htinfo[1], scan->htinfo[0] == IEEE80211_ELEMID_VENDOR ? 4 + sizeof(struct ieee80211_ie_htinfo)-2 : sizeof(struct ieee80211_ie_htinfo)-2, scan->htinfo = NULL); } /* Process VHT IEs */ if (scan->vhtcap != NULL) { IEEE80211_VERIFY_LENGTH(scan->vhtcap[1], sizeof(struct ieee80211_ie_vhtcap) - 2, scan->vhtcap = NULL); } if (scan->vhtopmode != NULL) { IEEE80211_VERIFY_LENGTH(scan->vhtopmode[1], sizeof(struct ieee80211_ie_vht_operation) - 2, scan->vhtopmode = NULL); } return scan->status; } /* * Parse an Action frame. Return 0 on success, non-zero on failure. */ int ieee80211_parse_action(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_action *ia; struct ieee80211_frame *wh; uint8_t *frm, *efrm; /* * action frame format: * [1] category * [1] action * [tlv] parameters */ wh = mtod(m, struct ieee80211_frame *); frm = (u_int8_t *)&wh[1]; efrm = mtod(m, u_int8_t *) + m->m_len; IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action), return EINVAL); ia = (const struct ieee80211_action *) frm; vap->iv_stats.is_rx_action++; IEEE80211_NODE_STAT(ni, rx_action); /* verify frame payloads but defer processing */ switch (ia->ia_category) { case IEEE80211_ACTION_CAT_BA: switch (ia->ia_action) { case IEEE80211_ACTION_BA_ADDBA_REQUEST: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_addbarequest), return EINVAL); break; case IEEE80211_ACTION_BA_ADDBA_RESPONSE: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_addbaresponse), return EINVAL); break; case IEEE80211_ACTION_BA_DELBA: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_delba), return EINVAL); break; } break; case IEEE80211_ACTION_CAT_HT: switch (ia->ia_action) { case IEEE80211_ACTION_HT_TXCHWIDTH: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ht_txchwidth), return EINVAL); break; case IEEE80211_ACTION_HT_MIMOPWRSAVE: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ht_mimopowersave), return EINVAL); break; } break; #ifdef IEEE80211_SUPPORT_MESH case IEEE80211_ACTION_CAT_MESH: switch (ia->ia_action) { case IEEE80211_ACTION_MESH_LMETRIC: /* * XXX: verification is true only if we are using * Airtime link metric (default) */ IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_meshlmetric_ie), return EINVAL); break; case IEEE80211_ACTION_MESH_HWMP: /* verify something */ break; case IEEE80211_ACTION_MESH_GANN: IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_meshgann_ie), return EINVAL); break; case IEEE80211_ACTION_MESH_CC: case IEEE80211_ACTION_MESH_MCCA_SREQ: case IEEE80211_ACTION_MESH_MCCA_SREP: case IEEE80211_ACTION_MESH_MCCA_AREQ: case IEEE80211_ACTION_MESH_MCCA_ADVER: case IEEE80211_ACTION_MESH_MCCA_TRDOWN: case IEEE80211_ACTION_MESH_TBTT_REQ: case IEEE80211_ACTION_MESH_TBTT_RES: /* reject these early on, not implemented */ IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT, wh, NULL, "not implemented yet, act=0x%02X", ia->ia_action); return EINVAL; } break; case IEEE80211_ACTION_CAT_SELF_PROT: /* If TA or RA group address discard silently */ if (IEEE80211_IS_MULTICAST(wh->i_addr1) || IEEE80211_IS_MULTICAST(wh->i_addr2)) return EINVAL; /* * XXX: Should we verify complete length now or it is * to varying in sizes? */ switch (ia->ia_action) { case IEEE80211_ACTION_MESHPEERING_CONFIRM: case IEEE80211_ACTION_MESHPEERING_CLOSE: /* is not a peering candidate (yet) */ if (ni == vap->iv_bss) return EINVAL; break; } break; #endif case IEEE80211_ACTION_CAT_VHT: printf("%s: TODO: VHT handling!\n", __func__); break; } return 0; } #ifdef IEEE80211_DEBUG /* * Debugging support. */ void ieee80211_ssid_mismatch(struct ieee80211vap *vap, const char *tag, uint8_t mac[IEEE80211_ADDR_LEN], uint8_t *ssid) { printf("[%s] discard %s frame, ssid mismatch: ", ether_sprintf(mac), tag); ieee80211_print_essid(ssid + 2, ssid[1]); printf("\n"); } /* * Return the bssid of a frame. */ static const uint8_t * ieee80211_getbssid(const struct ieee80211vap *vap, const struct ieee80211_frame *wh) { if (vap->iv_opmode == IEEE80211_M_STA) return wh->i_addr2; if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) return wh->i_addr1; if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PS_POLL) return wh->i_addr1; return wh->i_addr3; } #include void ieee80211_note(const struct ieee80211vap *vap, const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "%s", buf); /* NB: no \n */ if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } void ieee80211_note_frame(const struct ieee80211vap *vap, const struct ieee80211_frame *wh, const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "[%s] %s\n", ether_sprintf(ieee80211_getbssid(vap, wh)), buf); if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } void ieee80211_note_mac(const struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "[%s] %s\n", ether_sprintf(mac), buf); if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } void ieee80211_discard_frame(const struct ieee80211vap *vap, const struct ieee80211_frame *wh, const char *type, const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "[%s] discard %s frame, %s\n", ether_sprintf(ieee80211_getbssid(vap, wh)), type != NULL ? type : ieee80211_mgt_subtype_name(wh->i_fc[0]), buf); if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } void ieee80211_discard_ie(const struct ieee80211vap *vap, const struct ieee80211_frame *wh, const char *type, const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "[%s] discard%s%s information element, %s\n", ether_sprintf(ieee80211_getbssid(vap, wh)), type != NULL ? " " : "", type != NULL ? type : "", buf); if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } void ieee80211_discard_mac(const struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], const char *type, const char *fmt, ...) { char buf[256]; /* XXX */ va_list ap; int len; va_start(ap, fmt); len = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if_printf(vap->iv_ifp, "[%s] discard%s%s frame, %s\n", ether_sprintf(mac), type != NULL ? " " : "", type != NULL ? type : "", buf); if (len >= sizeof(buf)) printf("%s: XXX buffer too small: len = %d\n", __func__, len); } #endif /* IEEE80211_DEBUG */ diff --git a/sys/net80211/ieee80211_ioctl.c b/sys/net80211/ieee80211_ioctl.c index 419518eb1224..839f965f542f 100644 --- a/sys/net80211/ieee80211_ioctl.c +++ b/sys/net80211/ieee80211_ioctl.c @@ -1,3697 +1,3697 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 ioctl support (FreeBSD-specific) */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #include #include #include #include #define IS_UP_AUTO(_vap) \ (IFNET_IS_UP_RUNNING((_vap)->iv_ifp) && \ (_vap)->iv_roaming == IEEE80211_ROAMING_AUTO) static const uint8_t zerobssid[IEEE80211_ADDR_LEN]; static struct ieee80211_channel *findchannel(struct ieee80211com *, int ieee, int mode); static int ieee80211_scanreq(struct ieee80211vap *, struct ieee80211_scan_req *); static int ieee80211_ioctl_getkey(u_long cmd, struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct ieee80211req_key ik; struct ieee80211_key *wk; const struct ieee80211_cipher *cip; u_int kid; int error; if (ireq->i_len != sizeof(ik)) return EINVAL; error = copyin(ireq->i_data, &ik, sizeof(ik)); if (error) return error; kid = ik.ik_keyix; if (kid == IEEE80211_KEYIX_NONE) { ni = ieee80211_find_vap_node(&ic->ic_sta, vap, ik.ik_macaddr); if (ni == NULL) return ENOENT; wk = &ni->ni_ucastkey; } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; wk = &vap->iv_nw_keys[kid]; IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr); ni = NULL; } cip = wk->wk_cipher; ik.ik_type = cip->ic_cipher; ik.ik_keylen = wk->wk_keylen; ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV); if (wk->wk_keyix == vap->iv_def_txkey) ik.ik_flags |= IEEE80211_KEY_DEFAULT; if (ieee80211_priv_check_vap_getkey(cmd, vap, NULL) == 0) { /* NB: only root can read key data */ ik.ik_keyrsc = wk->wk_keyrsc[IEEE80211_NONQOS_TID]; ik.ik_keytsc = wk->wk_keytsc; memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen); if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) { memcpy(ik.ik_keydata+wk->wk_keylen, wk->wk_key + IEEE80211_KEYBUF_SIZE, IEEE80211_MICBUF_SIZE); ik.ik_keylen += IEEE80211_MICBUF_SIZE; } } else { ik.ik_keyrsc = 0; ik.ik_keytsc = 0; memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata)); } if (ni != NULL) ieee80211_free_node(ni); return copyout(&ik, ireq->i_data, sizeof(ik)); } static int ieee80211_ioctl_getchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; if (sizeof(ic->ic_chan_active) < ireq->i_len) ireq->i_len = sizeof(ic->ic_chan_active); return copyout(&ic->ic_chan_active, ireq->i_data, ireq->i_len); } static int ieee80211_ioctl_getchaninfo(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; uint32_t space; space = __offsetof(struct ieee80211req_chaninfo, ic_chans[ic->ic_nchans]); if (space > ireq->i_len) space = ireq->i_len; /* XXX assumes compatible layout */ return copyout(&ic->ic_nchans, ireq->i_data, space); } static int ieee80211_ioctl_getwpaie(struct ieee80211vap *vap, struct ieee80211req *ireq, int req) { struct ieee80211_node *ni; struct ieee80211req_wpaie2 *wpaie; int error; if (ireq->i_len < IEEE80211_ADDR_LEN) return EINVAL; wpaie = IEEE80211_MALLOC(sizeof(*wpaie), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (wpaie == NULL) return ENOMEM; error = copyin(ireq->i_data, wpaie->wpa_macaddr, IEEE80211_ADDR_LEN); if (error != 0) goto bad; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, wpaie->wpa_macaddr); if (ni == NULL) { error = ENOENT; goto bad; } if (ni->ni_ies.wpa_ie != NULL) { int ielen = ni->ni_ies.wpa_ie[1] + 2; if (ielen > sizeof(wpaie->wpa_ie)) ielen = sizeof(wpaie->wpa_ie); memcpy(wpaie->wpa_ie, ni->ni_ies.wpa_ie, ielen); } if (req == IEEE80211_IOC_WPAIE2) { if (ni->ni_ies.rsn_ie != NULL) { int ielen = ni->ni_ies.rsn_ie[1] + 2; if (ielen > sizeof(wpaie->rsn_ie)) ielen = sizeof(wpaie->rsn_ie); memcpy(wpaie->rsn_ie, ni->ni_ies.rsn_ie, ielen); } if (ireq->i_len > sizeof(struct ieee80211req_wpaie2)) ireq->i_len = sizeof(struct ieee80211req_wpaie2); } else { /* compatibility op, may overwrite wpa ie */ /* XXX check ic_flags? */ if (ni->ni_ies.rsn_ie != NULL) { int ielen = ni->ni_ies.rsn_ie[1] + 2; if (ielen > sizeof(wpaie->wpa_ie)) ielen = sizeof(wpaie->wpa_ie); memcpy(wpaie->wpa_ie, ni->ni_ies.rsn_ie, ielen); } if (ireq->i_len > sizeof(struct ieee80211req_wpaie)) ireq->i_len = sizeof(struct ieee80211req_wpaie); } ieee80211_free_node(ni); error = copyout(wpaie, ireq->i_data, ireq->i_len); bad: IEEE80211_FREE(wpaie, M_TEMP); return error; } static int ieee80211_ioctl_getstastats(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; uint8_t macaddr[IEEE80211_ADDR_LEN]; const size_t off = __offsetof(struct ieee80211req_sta_stats, is_stats); int error; if (ireq->i_len < off) return EINVAL; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; if (ireq->i_len > sizeof(struct ieee80211req_sta_stats)) ireq->i_len = sizeof(struct ieee80211req_sta_stats); /* NB: copy out only the statistics */ error = copyout(&ni->ni_stats, (uint8_t *) ireq->i_data + off, ireq->i_len - off); ieee80211_free_node(ni); return error; } struct scanreq { struct ieee80211req_scan_result *sr; size_t space; }; static size_t scan_space(const struct ieee80211_scan_entry *se, int *ielen) { size_t len; *ielen = se->se_ies.len; /* * NB: ie's can be no more than 255 bytes and the max 802.11 * packet is <3Kbytes so we are sure this doesn't overflow * 16-bits; if this is a concern we can drop the ie's. */ len = sizeof(struct ieee80211req_scan_result) + se->se_ssid[1] + se->se_meshid[1] + *ielen; return roundup(len, sizeof(uint32_t)); } static void get_scan_space(void *arg, const struct ieee80211_scan_entry *se) { struct scanreq *req = arg; int ielen; req->space += scan_space(se, &ielen); } static void get_scan_result(void *arg, const struct ieee80211_scan_entry *se) { struct scanreq *req = arg; struct ieee80211req_scan_result *sr; int ielen, len, nr, nxr; uint8_t *cp; len = scan_space(se, &ielen); if (len > req->space) return; sr = req->sr; KASSERT(len <= 65535 && ielen <= 65535, ("len %u ssid %u ie %u", len, se->se_ssid[1], ielen)); sr->isr_len = len; sr->isr_ie_off = sizeof(struct ieee80211req_scan_result); sr->isr_ie_len = ielen; sr->isr_freq = se->se_chan->ic_freq; sr->isr_flags = se->se_chan->ic_flags; sr->isr_rssi = se->se_rssi; sr->isr_noise = se->se_noise; sr->isr_intval = se->se_intval; sr->isr_capinfo = se->se_capinfo; sr->isr_erp = se->se_erp; IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid); nr = min(se->se_rates[1], IEEE80211_RATE_MAXSIZE); memcpy(sr->isr_rates, se->se_rates+2, nr); nxr = min(se->se_xrates[1], IEEE80211_RATE_MAXSIZE - nr); memcpy(sr->isr_rates+nr, se->se_xrates+2, nxr); sr->isr_nrates = nr + nxr; /* copy SSID */ sr->isr_ssid_len = se->se_ssid[1]; cp = ((uint8_t *)sr) + sr->isr_ie_off; memcpy(cp, se->se_ssid+2, sr->isr_ssid_len); /* copy mesh id */ cp += sr->isr_ssid_len; sr->isr_meshid_len = se->se_meshid[1]; memcpy(cp, se->se_meshid+2, sr->isr_meshid_len); cp += sr->isr_meshid_len; if (ielen) memcpy(cp, se->se_ies.data, ielen); req->space -= len; req->sr = (struct ieee80211req_scan_result *)(((uint8_t *)sr) + len); } static int ieee80211_ioctl_getscanresults(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct scanreq req; int error; if (ireq->i_len < sizeof(struct scanreq)) return EFAULT; error = 0; req.space = 0; ieee80211_scan_iterate(vap, get_scan_space, &req); if (req.space > ireq->i_len) req.space = ireq->i_len; if (req.space > 0) { uint32_t space; void *p; space = req.space; - /* XXX M_WAITOK after driver lock released */ + /* XXX IEEE80211_M_WAITOK after driver lock released */ p = IEEE80211_MALLOC(space, M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (p == NULL) return ENOMEM; req.sr = p; ieee80211_scan_iterate(vap, get_scan_result, &req); ireq->i_len = space - req.space; error = copyout(p, ireq->i_data, ireq->i_len); IEEE80211_FREE(p, M_TEMP); } else ireq->i_len = 0; return error; } struct stainforeq { struct ieee80211req_sta_info *si; size_t space; }; static size_t sta_space(const struct ieee80211_node *ni, size_t *ielen) { *ielen = ni->ni_ies.len; return roundup(sizeof(struct ieee80211req_sta_info) + *ielen, sizeof(uint32_t)); } static void get_sta_space(void *arg, struct ieee80211_node *ni) { struct stainforeq *req = arg; size_t ielen; if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP && ni->ni_associd == 0) /* only associated stations */ return; req->space += sta_space(ni, &ielen); } static void get_sta_info(void *arg, struct ieee80211_node *ni) { struct stainforeq *req = arg; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211req_sta_info *si; size_t ielen, len; uint8_t *cp; if (vap->iv_opmode == IEEE80211_M_HOSTAP && ni->ni_associd == 0) /* only associated stations */ return; if (ni->ni_chan == IEEE80211_CHAN_ANYC) /* XXX bogus entry */ return; len = sta_space(ni, &ielen); if (len > req->space) return; si = req->si; si->isi_len = len; si->isi_ie_off = sizeof(struct ieee80211req_sta_info); si->isi_ie_len = ielen; si->isi_freq = ni->ni_chan->ic_freq; si->isi_flags = ni->ni_chan->ic_flags; si->isi_state = ni->ni_flags; si->isi_authmode = ni->ni_authmode; vap->iv_ic->ic_node_getsignal(ni, &si->isi_rssi, &si->isi_noise); vap->iv_ic->ic_node_getmimoinfo(ni, &si->isi_mimo); si->isi_capinfo = ni->ni_capinfo; si->isi_erp = ni->ni_erp; IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr); si->isi_nrates = ni->ni_rates.rs_nrates; if (si->isi_nrates > 15) si->isi_nrates = 15; memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates); si->isi_txrate = ni->ni_txrate; if (si->isi_txrate & IEEE80211_RATE_MCS) { const struct ieee80211_mcs_rates *mcs = &ieee80211_htrates[ni->ni_txrate &~ IEEE80211_RATE_MCS]; if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { if (ni->ni_flags & IEEE80211_NODE_SGI40) si->isi_txmbps = mcs->ht40_rate_800ns; else si->isi_txmbps = mcs->ht40_rate_400ns; } else { if (ni->ni_flags & IEEE80211_NODE_SGI20) si->isi_txmbps = mcs->ht20_rate_800ns; else si->isi_txmbps = mcs->ht20_rate_400ns; } } else si->isi_txmbps = si->isi_txrate; si->isi_associd = ni->ni_associd; si->isi_txpower = ni->ni_txpower; si->isi_vlan = ni->ni_vlan; if (ni->ni_flags & IEEE80211_NODE_QOS) { memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs)); memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs)); } else { si->isi_txseqs[0] = ni->ni_txseqs[IEEE80211_NONQOS_TID]; si->isi_rxseqs[0] = ni->ni_rxseqs[IEEE80211_NONQOS_TID]; } /* NB: leave all cases in case we relax ni_associd == 0 check */ if (ieee80211_node_is_authorized(ni)) si->isi_inact = vap->iv_inact_run; else if (ni->ni_associd != 0 || (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) si->isi_inact = vap->iv_inact_auth; else si->isi_inact = vap->iv_inact_init; si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT; si->isi_localid = ni->ni_mllid; si->isi_peerid = ni->ni_mlpid; si->isi_peerstate = ni->ni_mlstate; if (ielen) { cp = ((uint8_t *)si) + si->isi_ie_off; memcpy(cp, ni->ni_ies.data, ielen); } req->si = (struct ieee80211req_sta_info *)(((uint8_t *)si) + len); req->space -= len; } static int getstainfo_common(struct ieee80211vap *vap, struct ieee80211req *ireq, struct ieee80211_node *ni, size_t off) { struct ieee80211com *ic = vap->iv_ic; struct stainforeq req; size_t space; void *p; int error; error = 0; req.space = 0; if (ni == NULL) { ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, get_sta_space, &req); } else get_sta_space(&req, ni); if (req.space > ireq->i_len) req.space = ireq->i_len; if (req.space > 0) { space = req.space; - /* XXX M_WAITOK after driver lock released */ + /* XXX IEEE80211_M_WAITOK after driver lock released */ p = IEEE80211_MALLOC(space, M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (p == NULL) { error = ENOMEM; goto bad; } req.si = p; if (ni == NULL) { ieee80211_iterate_nodes_vap(&ic->ic_sta, vap, get_sta_info, &req); } else get_sta_info(&req, ni); ireq->i_len = space - req.space; error = copyout(p, (uint8_t *) ireq->i_data+off, ireq->i_len); IEEE80211_FREE(p, M_TEMP); } else ireq->i_len = 0; bad: if (ni != NULL) ieee80211_free_node(ni); return error; } static int ieee80211_ioctl_getstainfo(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t macaddr[IEEE80211_ADDR_LEN]; const size_t off = __offsetof(struct ieee80211req_sta_req, info); struct ieee80211_node *ni; int error; if (ireq->i_len < sizeof(struct ieee80211req_sta_req)) return EFAULT; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; if (IEEE80211_ADDR_EQ(macaddr, vap->iv_ifp->if_broadcastaddr)) { ni = NULL; } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; } return getstainfo_common(vap, ireq, ni, off); } static int ieee80211_ioctl_getstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_txpow txpow; int error; if (ireq->i_len != sizeof(txpow)) return EINVAL; error = copyin(ireq->i_data, &txpow, sizeof(txpow)); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr); if (ni == NULL) return ENOENT; txpow.it_txpow = ni->ni_txpower; error = copyout(&txpow, ireq->i_data, sizeof(txpow)); ieee80211_free_node(ni); return error; } static int ieee80211_ioctl_getwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; struct wmeParams *wmep; int ac; if ((ic->ic_caps & IEEE80211_C_WME) == 0) return EINVAL; ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL); if (ac >= WME_NUM_AC) ac = WME_AC_BE; if (ireq->i_len & IEEE80211_WMEPARAM_BSS) wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; else wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; switch (ireq->i_type) { case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ ireq->i_val = wmep->wmep_logcwmin; break; case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ ireq->i_val = wmep->wmep_logcwmax; break; case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ ireq->i_val = wmep->wmep_aifsn; break; case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ ireq->i_val = wmep->wmep_txopLimit; break; case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; ireq->i_val = wmep->wmep_acm; break; case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/ wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; ireq->i_val = !wmep->wmep_noackPolicy; break; } return 0; } static int ieee80211_ioctl_getmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq) { const struct ieee80211_aclator *acl = vap->iv_acl; return (acl == NULL ? EINVAL : acl->iac_getioctl(vap, ireq)); } static int ieee80211_ioctl_getcurchan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *c; if (ireq->i_len != sizeof(struct ieee80211_channel)) return EINVAL; /* * vap's may have different operating channels when HT is * in use. When in RUN state report the vap-specific channel. * Otherwise return curchan. */ if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) c = vap->iv_bss->ni_chan; else c = ic->ic_curchan; return copyout(c, ireq->i_data, sizeof(*c)); } static int getappie(const struct ieee80211_appie *aie, struct ieee80211req *ireq) { if (aie == NULL) return EINVAL; /* NB: truncate, caller can check length */ if (ireq->i_len > aie->ie_len) ireq->i_len = aie->ie_len; return copyout(aie->ie_data, ireq->i_data, ireq->i_len); } static int ieee80211_ioctl_getappie(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t fc0; fc0 = ireq->i_val & 0xff; if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return EINVAL; /* NB: could check iv_opmode and reject but hardly worth the effort */ switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: return getappie(vap->iv_appie_beacon, ireq); case IEEE80211_FC0_SUBTYPE_PROBE_RESP: return getappie(vap->iv_appie_proberesp, ireq); case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: return getappie(vap->iv_appie_assocresp, ireq); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return getappie(vap->iv_appie_probereq, ireq); case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: return getappie(vap->iv_appie_assocreq, ireq); case IEEE80211_FC0_SUBTYPE_BEACON|IEEE80211_FC0_SUBTYPE_PROBE_RESP: return getappie(vap->iv_appie_wpa, ireq); } return EINVAL; } static int ieee80211_ioctl_getregdomain(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; if (ireq->i_len != sizeof(ic->ic_regdomain)) return EINVAL; return copyout(&ic->ic_regdomain, ireq->i_data, sizeof(ic->ic_regdomain)); } static int ieee80211_ioctl_getroam(struct ieee80211vap *vap, const struct ieee80211req *ireq) { size_t len = ireq->i_len; /* NB: accept short requests for backwards compat */ if (len > sizeof(vap->iv_roamparms)) len = sizeof(vap->iv_roamparms); return copyout(vap->iv_roamparms, ireq->i_data, len); } static int ieee80211_ioctl_gettxparams(struct ieee80211vap *vap, const struct ieee80211req *ireq) { size_t len = ireq->i_len; /* NB: accept short requests for backwards compat */ if (len > sizeof(vap->iv_txparms)) len = sizeof(vap->iv_txparms); return copyout(vap->iv_txparms, ireq->i_data, len); } static int ieee80211_ioctl_getdevcaps(struct ieee80211com *ic, const struct ieee80211req *ireq) { struct ieee80211_devcaps_req *dc; struct ieee80211req_chaninfo *ci; int maxchans, error; maxchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_devcaps_req)) / sizeof(struct ieee80211_channel)); /* NB: require 1 so we know ic_nchans is accessible */ if (maxchans < 1) return EINVAL; /* constrain max request size, 2K channels is ~24Kbytes */ if (maxchans > 2048) maxchans = 2048; dc = (struct ieee80211_devcaps_req *) IEEE80211_MALLOC(IEEE80211_DEVCAPS_SIZE(maxchans), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (dc == NULL) return ENOMEM; dc->dc_drivercaps = ic->ic_caps; dc->dc_cryptocaps = ic->ic_cryptocaps; dc->dc_htcaps = ic->ic_htcaps; dc->dc_vhtcaps = ic->ic_vhtcaps; ci = &dc->dc_chaninfo; ic->ic_getradiocaps(ic, maxchans, &ci->ic_nchans, ci->ic_chans); KASSERT(ci->ic_nchans <= maxchans, ("nchans %d maxchans %d", ci->ic_nchans, maxchans)); ieee80211_sort_channels(ci->ic_chans, ci->ic_nchans); error = copyout(dc, ireq->i_data, IEEE80211_DEVCAPS_SPACE(dc)); IEEE80211_FREE(dc, M_TEMP); return error; } static int ieee80211_ioctl_getstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_vlan vlan; int error; if (ireq->i_len != sizeof(vlan)) return EINVAL; error = copyin(ireq->i_data, &vlan, sizeof(vlan)); if (error != 0) return error; if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, vlan.sv_macaddr); if (ni == NULL) return ENOENT; } else ni = ieee80211_ref_node(vap->iv_bss); vlan.sv_vlan = ni->ni_vlan; error = copyout(&vlan, ireq->i_data, sizeof(vlan)); ieee80211_free_node(ni); return error; } /* * Dummy ioctl get handler so the linker set is defined. */ static int dummy_ioctl_get(struct ieee80211vap *vap, struct ieee80211req *ireq) { return ENOSYS; } IEEE80211_IOCTL_GET(dummy, dummy_ioctl_get); static int ieee80211_ioctl_getdefault(struct ieee80211vap *vap, struct ieee80211req *ireq) { ieee80211_ioctl_getfunc * const *get; int error; SET_FOREACH(get, ieee80211_ioctl_getset) { error = (*get)(vap, ireq); if (error != ENOSYS) return error; } return EINVAL; } static int ieee80211_ioctl_get80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; u_int kid, len; uint8_t tmpkey[IEEE80211_KEYBUF_SIZE]; char tmpssid[IEEE80211_NWID_LEN]; int error = 0; switch (ireq->i_type) { case IEEE80211_IOC_IC_NAME: len = strlen(ic->ic_name) + 1; if (len > ireq->i_len) return (EINVAL); ireq->i_len = len; error = copyout(ic->ic_name, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_SSID: switch (vap->iv_state) { case IEEE80211_S_INIT: case IEEE80211_S_SCAN: ireq->i_len = vap->iv_des_ssid[0].len; memcpy(tmpssid, vap->iv_des_ssid[0].ssid, ireq->i_len); break; default: ireq->i_len = vap->iv_bss->ni_esslen; memcpy(tmpssid, vap->iv_bss->ni_essid, ireq->i_len); break; } error = copyout(tmpssid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_NUMSSIDS: ireq->i_val = 1; break; case IEEE80211_IOC_WEP: if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) ireq->i_val = IEEE80211_WEP_OFF; else if (vap->iv_flags & IEEE80211_F_DROPUNENC) ireq->i_val = IEEE80211_WEP_ON; else ireq->i_val = IEEE80211_WEP_MIXED; break; case IEEE80211_IOC_WEPKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID) return EINVAL; len = (u_int) vap->iv_nw_keys[kid].wk_keylen; /* NB: only root can read WEP keys */ if (ieee80211_priv_check_vap_getkey(cmd, vap, NULL) == 0) { bcopy(vap->iv_nw_keys[kid].wk_key, tmpkey, len); } else { bzero(tmpkey, len); } ireq->i_len = len; error = copyout(tmpkey, ireq->i_data, len); break; case IEEE80211_IOC_NUMWEPKEYS: ireq->i_val = IEEE80211_WEP_NKID; break; case IEEE80211_IOC_WEPTXKEY: ireq->i_val = vap->iv_def_txkey; break; case IEEE80211_IOC_AUTHMODE: if (vap->iv_flags & IEEE80211_F_WPA) ireq->i_val = IEEE80211_AUTH_WPA; else ireq->i_val = vap->iv_bss->ni_authmode; break; case IEEE80211_IOC_CHANNEL: ireq->i_val = ieee80211_chan2ieee(ic, ic->ic_curchan); break; case IEEE80211_IOC_POWERSAVE: if (vap->iv_flags & IEEE80211_F_PMGTON) ireq->i_val = IEEE80211_POWERSAVE_ON; else ireq->i_val = IEEE80211_POWERSAVE_OFF; break; case IEEE80211_IOC_POWERSAVESLEEP: ireq->i_val = ic->ic_lintval; break; case IEEE80211_IOC_RTSTHRESHOLD: ireq->i_val = vap->iv_rtsthreshold; break; case IEEE80211_IOC_PROTMODE: ireq->i_val = vap->iv_protmode; break; case IEEE80211_IOC_TXPOWER: /* * Tx power limit is the min of max regulatory * power, any user-set limit, and the max the * radio can do. * * TODO: methodize this */ ireq->i_val = 2*ic->ic_curchan->ic_maxregpower; if (ireq->i_val > ic->ic_txpowlimit) ireq->i_val = ic->ic_txpowlimit; if (ireq->i_val > ic->ic_curchan->ic_maxpower) ireq->i_val = ic->ic_curchan->ic_maxpower; break; case IEEE80211_IOC_WPA: switch (vap->iv_flags & IEEE80211_F_WPA) { case IEEE80211_F_WPA1: ireq->i_val = 1; break; case IEEE80211_F_WPA2: ireq->i_val = 2; break; case IEEE80211_F_WPA1 | IEEE80211_F_WPA2: ireq->i_val = 3; break; default: ireq->i_val = 0; break; } break; case IEEE80211_IOC_CHANLIST: error = ieee80211_ioctl_getchanlist(vap, ireq); break; case IEEE80211_IOC_ROAMING: ireq->i_val = vap->iv_roaming; break; case IEEE80211_IOC_PRIVACY: ireq->i_val = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0; break; case IEEE80211_IOC_DROPUNENCRYPTED: ireq->i_val = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0; break; case IEEE80211_IOC_COUNTERMEASURES: ireq->i_val = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0; break; case IEEE80211_IOC_WME: ireq->i_val = (vap->iv_flags & IEEE80211_F_WME) != 0; break; case IEEE80211_IOC_HIDESSID: ireq->i_val = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0; break; case IEEE80211_IOC_APBRIDGE: ireq->i_val = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0; break; case IEEE80211_IOC_WPAKEY: error = ieee80211_ioctl_getkey(cmd, vap, ireq); break; case IEEE80211_IOC_CHANINFO: error = ieee80211_ioctl_getchaninfo(vap, ireq); break; case IEEE80211_IOC_BSSID: if (ireq->i_len != IEEE80211_ADDR_LEN) return EINVAL; if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) { error = copyout(vap->iv_opmode == IEEE80211_M_WDS ? vap->iv_bss->ni_macaddr : vap->iv_bss->ni_bssid, ireq->i_data, ireq->i_len); } else error = copyout(vap->iv_des_bssid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_WPAIE: case IEEE80211_IOC_WPAIE2: error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type); break; case IEEE80211_IOC_SCAN_RESULTS: error = ieee80211_ioctl_getscanresults(vap, ireq); break; case IEEE80211_IOC_STA_STATS: error = ieee80211_ioctl_getstastats(vap, ireq); break; case IEEE80211_IOC_TXPOWMAX: ireq->i_val = vap->iv_bss->ni_txpower; break; case IEEE80211_IOC_STA_TXPOW: error = ieee80211_ioctl_getstatxpow(vap, ireq); break; case IEEE80211_IOC_STA_INFO: error = ieee80211_ioctl_getstainfo(vap, ireq); break; case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only) */ error = ieee80211_ioctl_getwmeparam(vap, ireq); break; case IEEE80211_IOC_DTIM_PERIOD: ireq->i_val = vap->iv_dtim_period; break; case IEEE80211_IOC_BEACON_INTERVAL: /* NB: get from ic_bss for station mode */ ireq->i_val = vap->iv_bss->ni_intval; break; case IEEE80211_IOC_PUREG: ireq->i_val = (vap->iv_flags & IEEE80211_F_PUREG) != 0; break; case IEEE80211_IOC_QUIET: ireq->i_val = vap->iv_quiet; break; case IEEE80211_IOC_QUIET_COUNT: ireq->i_val = vap->iv_quiet_count; break; case IEEE80211_IOC_QUIET_PERIOD: ireq->i_val = vap->iv_quiet_period; break; case IEEE80211_IOC_QUIET_DUR: ireq->i_val = vap->iv_quiet_duration; break; case IEEE80211_IOC_QUIET_OFFSET: ireq->i_val = vap->iv_quiet_offset; break; case IEEE80211_IOC_BGSCAN: ireq->i_val = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0; break; case IEEE80211_IOC_BGSCAN_IDLE: ireq->i_val = vap->iv_bgscanidle*hz/1000; /* ms */ break; case IEEE80211_IOC_BGSCAN_INTERVAL: ireq->i_val = vap->iv_bgscanintvl/hz; /* seconds */ break; case IEEE80211_IOC_SCANVALID: ireq->i_val = vap->iv_scanvalid/hz; /* seconds */ break; case IEEE80211_IOC_FRAGTHRESHOLD: ireq->i_val = vap->iv_fragthreshold; break; case IEEE80211_IOC_MACCMD: error = ieee80211_ioctl_getmaccmd(vap, ireq); break; case IEEE80211_IOC_BURST: ireq->i_val = (vap->iv_flags & IEEE80211_F_BURST) != 0; break; case IEEE80211_IOC_BMISSTHRESHOLD: ireq->i_val = vap->iv_bmissthreshold; break; case IEEE80211_IOC_CURCHAN: error = ieee80211_ioctl_getcurchan(vap, ireq); break; case IEEE80211_IOC_SHORTGI: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) ireq->i_val |= IEEE80211_HTCAP_SHORTGI20; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) ireq->i_val |= IEEE80211_HTCAP_SHORTGI40; break; case IEEE80211_IOC_AMPDU: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_AMPDU_LIMIT: /* XXX TODO: make this a per-node thing; and leave this as global */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) ireq->i_val = vap->iv_ampdu_rxmax; else if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) /* * XXX TODO: this isn't completely correct, as we've * negotiated the higher of the two. */ ireq->i_val = _IEEE80211_MASKSHIFT( vap->iv_bss->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); else ireq->i_val = vap->iv_ampdu_limit; break; case IEEE80211_IOC_AMPDU_DENSITY: /* XXX TODO: make this a per-node thing; and leave this as global */ if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) /* * XXX TODO: this isn't completely correct, as we've * negotiated the higher of the two. */ ireq->i_val = _IEEE80211_MASKSHIFT(vap->iv_bss->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); else ireq->i_val = vap->iv_ampdu_density; break; case IEEE80211_IOC_AMSDU: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_AMSDU_LIMIT: ireq->i_val = vap->iv_amsdu_limit; /* XXX truncation? */ break; case IEEE80211_IOC_PUREN: ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_PUREN) != 0; break; case IEEE80211_IOC_DOTH: ireq->i_val = (vap->iv_flags & IEEE80211_F_DOTH) != 0; break; case IEEE80211_IOC_REGDOMAIN: error = ieee80211_ioctl_getregdomain(vap, ireq); break; case IEEE80211_IOC_ROAM: error = ieee80211_ioctl_getroam(vap, ireq); break; case IEEE80211_IOC_TXPARAMS: error = ieee80211_ioctl_gettxparams(vap, ireq); break; case IEEE80211_IOC_HTCOMPAT: ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) != 0; break; case IEEE80211_IOC_DWDS: ireq->i_val = (vap->iv_flags & IEEE80211_F_DWDS) != 0; break; case IEEE80211_IOC_INACTIVITY: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_INACT) != 0; break; case IEEE80211_IOC_APPIE: error = ieee80211_ioctl_getappie(vap, ireq); break; case IEEE80211_IOC_WPS: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_WPS) != 0; break; case IEEE80211_IOC_TSN: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_TSN) != 0; break; case IEEE80211_IOC_DFS: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DFS) != 0; break; case IEEE80211_IOC_DOTD: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DOTD) != 0; break; case IEEE80211_IOC_DEVCAPS: error = ieee80211_ioctl_getdevcaps(ic, ireq); break; case IEEE80211_IOC_HTPROTMODE: ireq->i_val = vap->iv_htprotmode; break; case IEEE80211_IOC_HTCONF: if (vap->iv_flags_ht & IEEE80211_FHT_HT) { ireq->i_val = 1; if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40) ireq->i_val |= 2; } else ireq->i_val = 0; break; case IEEE80211_IOC_STA_VLAN: error = ieee80211_ioctl_getstavlan(vap, ireq); break; case IEEE80211_IOC_SMPS: if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) { if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_RTS) ireq->i_val = IEEE80211_HTCAP_SMPS_DYNAMIC; else if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_PS) ireq->i_val = IEEE80211_HTCAP_SMPS_ENA; else ireq->i_val = IEEE80211_HTCAP_SMPS_OFF; } else ireq->i_val = vap->iv_htcaps & IEEE80211_HTCAP_SMPS; break; case IEEE80211_IOC_RIFS: if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) ireq->i_val = (vap->iv_bss->ni_flags & IEEE80211_NODE_RIFS) != 0; else ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_RIFS) != 0; break; case IEEE80211_IOC_STBC: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_STBC_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_STBC_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_LDPC: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_LDPC_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_LDPC_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_UAPSD: ireq->i_val = 0; if (vap->iv_flags_ext & IEEE80211_FEXT_UAPSD) ireq->i_val = 1; break; case IEEE80211_IOC_VHTCONF: ireq->i_val = vap->iv_flags_vht & IEEE80211_FVHT_MASK; break; default: error = ieee80211_ioctl_getdefault(vap, ireq); break; } return error; } static int ieee80211_ioctl_setkey(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_key ik; struct ieee80211_node *ni; struct ieee80211_key *wk; uint16_t kid; int error, i; if (ireq->i_len != sizeof(ik)) return EINVAL; error = copyin(ireq->i_data, &ik, sizeof(ik)); if (error) return error; /* NB: cipher support is verified by ieee80211_crypt_newkey */ /* NB: this also checks ik->ik_keylen > sizeof(wk->wk_key) */ if (ik.ik_keylen > sizeof(ik.ik_keydata)) return E2BIG; kid = ik.ik_keyix; if (kid == IEEE80211_KEYIX_NONE) { /* XXX unicast keys currently must be tx/rx */ if (ik.ik_flags != (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)) return EINVAL; if (vap->iv_opmode == IEEE80211_M_STA) { ni = ieee80211_ref_node(vap->iv_bss); if (!IEEE80211_ADDR_EQ(ik.ik_macaddr, ni->ni_bssid)) { ieee80211_free_node(ni); return EADDRNOTAVAIL; } } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, ik.ik_macaddr); if (ni == NULL) return ENOENT; } wk = &ni->ni_ucastkey; } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; wk = &vap->iv_nw_keys[kid]; /* * Global slots start off w/o any assigned key index. * Force one here for consistency with IEEE80211_IOC_WEPKEY. */ if (wk->wk_keyix == IEEE80211_KEYIX_NONE) wk->wk_keyix = kid; ni = NULL; } error = 0; ieee80211_key_update_begin(vap); if (ieee80211_crypto_newkey(vap, ik.ik_type, ik.ik_flags, wk)) { wk->wk_keylen = ik.ik_keylen; /* NB: MIC presence is implied by cipher type */ if (wk->wk_keylen > IEEE80211_KEYBUF_SIZE) wk->wk_keylen = IEEE80211_KEYBUF_SIZE; for (i = 0; i < IEEE80211_TID_SIZE; i++) wk->wk_keyrsc[i] = ik.ik_keyrsc; wk->wk_keytsc = 0; /* new key, reset */ memset(wk->wk_key, 0, sizeof(wk->wk_key)); memcpy(wk->wk_key, ik.ik_keydata, ik.ik_keylen); IEEE80211_ADDR_COPY(wk->wk_macaddr, ni != NULL ? ni->ni_macaddr : ik.ik_macaddr); if (!ieee80211_crypto_setkey(vap, wk)) error = EIO; else if ((ik.ik_flags & IEEE80211_KEY_DEFAULT)) /* * Inform the driver that this is the default * transmit key. Now, ideally we'd just set * a flag in the key update that would * say "yes, we're the default key", but * that currently isn't the way the ioctl -> * key interface works. */ ieee80211_crypto_set_deftxkey(vap, kid); } else error = ENXIO; ieee80211_key_update_end(vap); if (ni != NULL) ieee80211_free_node(ni); return error; } static int ieee80211_ioctl_delkey(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_del_key dk; int kid, error; if (ireq->i_len != sizeof(dk)) return EINVAL; error = copyin(ireq->i_data, &dk, sizeof(dk)); if (error) return error; kid = dk.idk_keyix; /* XXX uint8_t -> uint16_t */ if (dk.idk_keyix == (uint8_t) IEEE80211_KEYIX_NONE) { struct ieee80211_node *ni; if (vap->iv_opmode == IEEE80211_M_STA) { ni = ieee80211_ref_node(vap->iv_bss); if (!IEEE80211_ADDR_EQ(dk.idk_macaddr, ni->ni_bssid)) { ieee80211_free_node(ni); return EADDRNOTAVAIL; } } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, dk.idk_macaddr); if (ni == NULL) return ENOENT; } /* XXX error return */ ieee80211_node_delucastkey(ni); ieee80211_free_node(ni); } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; /* XXX error return */ ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[kid]); } return 0; } struct mlmeop { struct ieee80211vap *vap; int op; int reason; }; static void mlmedebug(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int op, int reason) { #ifdef IEEE80211_DEBUG static const struct { int mask; const char *opstr; } ops[] = { { 0, "op#0" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_ASSOC, "assoc" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_ASSOC, "disassoc" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "deauth" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "authorize" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "unauthorize" }, }; if (op == IEEE80211_MLME_AUTH) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, mac, "station authenticate %s via MLME (reason: %d (%s))", reason == IEEE80211_STATUS_SUCCESS ? "ACCEPT" : "REJECT", reason, ieee80211_reason_to_string(reason)); } else if (!(IEEE80211_MLME_ASSOC <= op && op <= IEEE80211_MLME_AUTH)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, mac, "unknown MLME request %d (reason: %d (%s))", op, reason, ieee80211_reason_to_string(reason)); } else if (reason == IEEE80211_STATUS_SUCCESS) { IEEE80211_NOTE_MAC(vap, ops[op].mask, mac, "station %s via MLME", ops[op].opstr); } else { IEEE80211_NOTE_MAC(vap, ops[op].mask, mac, "station %s via MLME (reason: %d (%s))", ops[op].opstr, reason, ieee80211_reason_to_string(reason)); } #endif /* IEEE80211_DEBUG */ } static void domlme(void *arg, struct ieee80211_node *ni) { struct mlmeop *mop = arg; struct ieee80211vap *vap = ni->ni_vap; if (vap != mop->vap) return; /* * NB: if ni_associd is zero then the node is already cleaned * up and we don't need to do this (we're safely holding a * reference but should otherwise not modify it's state). */ if (ni->ni_associd == 0) return; mlmedebug(vap, ni->ni_macaddr, mop->op, mop->reason); if (mop->op == IEEE80211_MLME_DEAUTH) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, mop->reason); } else { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, mop->reason); } ieee80211_node_leave(ni); } static int setmlme_dropsta(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], struct mlmeop *mlmeop) { struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta; struct ieee80211_node *ni; int error = 0; /* NB: the broadcast address means do 'em all */ if (!IEEE80211_ADDR_EQ(mac, vap->iv_ifp->if_broadcastaddr)) { IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_node_locked(nt, mac); IEEE80211_NODE_UNLOCK(nt); /* * Don't do the node update inside the node * table lock. This unfortunately causes LORs * with drivers and their TX paths. */ if (ni != NULL) { domlme(mlmeop, ni); ieee80211_free_node(ni); } else error = ENOENT; } else { ieee80211_iterate_nodes(nt, domlme, mlmeop); } return error; } static int setmlme_common(struct ieee80211vap *vap, int op, const uint8_t mac[IEEE80211_ADDR_LEN], int reason) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *ni; struct mlmeop mlmeop; int error; error = 0; switch (op) { case IEEE80211_MLME_DISASSOC: case IEEE80211_MLME_DEAUTH: switch (vap->iv_opmode) { case IEEE80211_M_STA: mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason); /* XXX not quite right */ ieee80211_new_state(vap, IEEE80211_S_INIT, reason); break; case IEEE80211_M_HOSTAP: mlmeop.vap = vap; mlmeop.op = op; mlmeop.reason = reason; error = setmlme_dropsta(vap, mac, &mlmeop); break; case IEEE80211_M_WDS: /* XXX user app should send raw frame? */ if (op != IEEE80211_MLME_DEAUTH) { error = EINVAL; break; } #if 0 /* XXX accept any address, simplifies user code */ if (!IEEE80211_ADDR_EQ(mac, vap->iv_bss->ni_macaddr)) { error = EINVAL; break; } #endif mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason); ni = ieee80211_ref_node(vap->iv_bss); IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, reason); ieee80211_free_node(ni); break; case IEEE80211_M_MBSS: IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_node_locked(nt, mac); /* * Don't do the node update inside the node * table lock. This unfortunately causes LORs * with drivers and their TX paths. */ IEEE80211_NODE_UNLOCK(nt); if (ni != NULL) { ieee80211_node_leave(ni); ieee80211_free_node(ni); } else { error = ENOENT; } break; default: error = EINVAL; break; } break; case IEEE80211_MLME_AUTHORIZE: case IEEE80211_MLME_UNAUTHORIZE: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_WDS) { error = EINVAL; break; } IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_vap_node_locked(nt, vap, mac); /* * Don't do the node update inside the node * table lock. This unfortunately causes LORs * with drivers and their TX paths. */ IEEE80211_NODE_UNLOCK(nt); if (ni != NULL) { mlmedebug(vap, mac, op, reason); if (op == IEEE80211_MLME_AUTHORIZE) ieee80211_node_authorize(ni); else ieee80211_node_unauthorize(ni); ieee80211_free_node(ni); } else error = ENOENT; break; case IEEE80211_MLME_AUTH: if (vap->iv_opmode != IEEE80211_M_HOSTAP) { error = EINVAL; break; } IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_vap_node_locked(nt, vap, mac); /* * Don't do the node update inside the node * table lock. This unfortunately causes LORs * with drivers and their TX paths. */ IEEE80211_NODE_UNLOCK(nt); if (ni != NULL) { mlmedebug(vap, mac, op, reason); if (reason == IEEE80211_STATUS_SUCCESS) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 2); /* * For shared key auth, just continue the * exchange. Otherwise when 802.1x is not in * use mark the port authorized at this point * so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X && ni->ni_challenge == NULL) ieee80211_node_authorize(ni); } else { vap->iv_stats.is_rx_acl++; ieee80211_send_error(ni, ni->ni_macaddr, IEEE80211_FC0_SUBTYPE_AUTH, 2|(reason<<16)); ieee80211_node_leave(ni); } ieee80211_free_node(ni); } else error = ENOENT; break; default: error = EINVAL; break; } return error; } struct scanlookup { const uint8_t *mac; int esslen; const uint8_t *essid; const struct ieee80211_scan_entry *se; }; /* * Match mac address and any ssid. */ static void mlmelookup(void *arg, const struct ieee80211_scan_entry *se) { struct scanlookup *look = arg; if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr)) return; if (look->esslen != 0) { if (se->se_ssid[1] != look->esslen) return; if (memcmp(look->essid, se->se_ssid+2, look->esslen)) return; } look->se = se; } static int setmlme_assoc_sta(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len, const uint8_t ssid[IEEE80211_NWID_LEN]) { struct scanlookup lookup; KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("expected opmode STA not %s", ieee80211_opmode_name[vap->iv_opmode])); /* NB: this is racey if roaming is !manual */ lookup.se = NULL; lookup.mac = mac; lookup.esslen = ssid_len; lookup.essid = ssid; ieee80211_scan_iterate(vap, mlmelookup, &lookup); if (lookup.se == NULL) return ENOENT; mlmedebug(vap, mac, IEEE80211_MLME_ASSOC, 0); if (!ieee80211_sta_join(vap, lookup.se->se_chan, lookup.se)) return EIO; /* XXX unique but could be better */ return 0; } static int setmlme_assoc_adhoc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len, const uint8_t ssid[IEEE80211_NWID_LEN]) { struct ieee80211_scan_req *sr; int error; KASSERT(vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_AHDEMO, ("expected opmode IBSS or AHDEMO not %s", ieee80211_opmode_name[vap->iv_opmode])); if (ssid_len == 0 || ssid_len > IEEE80211_NWID_LEN) return EINVAL; sr = IEEE80211_MALLOC(sizeof(*sr), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (sr == NULL) return ENOMEM; /* NB: IEEE80211_IOC_SSID call missing for ap_scan=2. */ memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN); vap->iv_des_ssid[0].len = ssid_len; memcpy(vap->iv_des_ssid[0].ssid, ssid, ssid_len); vap->iv_des_nssid = 1; sr->sr_flags = IEEE80211_IOC_SCAN_ACTIVE | IEEE80211_IOC_SCAN_ONCE; sr->sr_duration = IEEE80211_IOC_SCAN_FOREVER; memcpy(sr->sr_ssid[0].ssid, ssid, ssid_len); sr->sr_ssid[0].len = ssid_len; sr->sr_nssid = 1; error = ieee80211_scanreq(vap, sr); IEEE80211_FREE(sr, M_TEMP); return error; } static int ieee80211_ioctl_setmlme(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_mlme mlme; int error; if (ireq->i_len != sizeof(mlme)) return EINVAL; error = copyin(ireq->i_data, &mlme, sizeof(mlme)); if (error) return error; if (vap->iv_opmode == IEEE80211_M_STA && mlme.im_op == IEEE80211_MLME_ASSOC) return setmlme_assoc_sta(vap, mlme.im_macaddr, vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid); else if ((vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_AHDEMO) && mlme.im_op == IEEE80211_MLME_ASSOC) return setmlme_assoc_adhoc(vap, mlme.im_macaddr, mlme.im_ssid_len, mlme.im_ssid); else return setmlme_common(vap, mlme.im_op, mlme.im_macaddr, mlme.im_reason); } static int ieee80211_ioctl_macmac(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t mac[IEEE80211_ADDR_LEN]; const struct ieee80211_aclator *acl = vap->iv_acl; int error; if (ireq->i_len != sizeof(mac)) return EINVAL; error = copyin(ireq->i_data, mac, ireq->i_len); if (error) return error; if (acl == NULL) { acl = ieee80211_aclator_get("mac"); if (acl == NULL || !acl->iac_attach(vap)) return EINVAL; vap->iv_acl = acl; } if (ireq->i_type == IEEE80211_IOC_ADDMAC) acl->iac_add(vap, mac); else acl->iac_remove(vap, mac); return 0; } static int ieee80211_ioctl_setmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq) { const struct ieee80211_aclator *acl = vap->iv_acl; switch (ireq->i_val) { case IEEE80211_MACCMD_POLICY_OPEN: case IEEE80211_MACCMD_POLICY_ALLOW: case IEEE80211_MACCMD_POLICY_DENY: case IEEE80211_MACCMD_POLICY_RADIUS: if (acl == NULL) { acl = ieee80211_aclator_get("mac"); if (acl == NULL || !acl->iac_attach(vap)) return EINVAL; vap->iv_acl = acl; } acl->iac_setpolicy(vap, ireq->i_val); break; case IEEE80211_MACCMD_FLUSH: if (acl != NULL) acl->iac_flush(vap); /* NB: silently ignore when not in use */ break; case IEEE80211_MACCMD_DETACH: if (acl != NULL) { vap->iv_acl = NULL; acl->iac_detach(vap); } break; default: if (acl == NULL) return EINVAL; else return acl->iac_setioctl(vap, ireq); } return 0; } static int ieee80211_ioctl_setchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; uint8_t *chanlist, *list; int i, nchan, maxchan, error; if (ireq->i_len > sizeof(ic->ic_chan_active)) ireq->i_len = sizeof(ic->ic_chan_active); list = IEEE80211_MALLOC(ireq->i_len + IEEE80211_CHAN_BYTES, M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (list == NULL) return ENOMEM; error = copyin(ireq->i_data, list, ireq->i_len); if (error) { IEEE80211_FREE(list, M_TEMP); return error; } nchan = 0; chanlist = list + ireq->i_len; /* NB: zero'd already */ maxchan = ireq->i_len * NBBY; for (i = 0; i < ic->ic_nchans; i++) { const struct ieee80211_channel *c = &ic->ic_channels[i]; /* * Calculate the intersection of the user list and the * available channels so users can do things like specify * 1-255 to get all available channels. */ if (c->ic_ieee < maxchan && isset(list, c->ic_ieee)) { setbit(chanlist, c->ic_ieee); nchan++; } } if (nchan == 0) { IEEE80211_FREE(list, M_TEMP); return EINVAL; } if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && /* XXX */ isclr(chanlist, ic->ic_bsschan->ic_ieee)) ic->ic_bsschan = IEEE80211_CHAN_ANYC; memcpy(ic->ic_chan_active, chanlist, IEEE80211_CHAN_BYTES); ieee80211_scan_flush(vap); IEEE80211_FREE(list, M_TEMP); return ENETRESET; } static int ieee80211_ioctl_setstastats(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; uint8_t macaddr[IEEE80211_ADDR_LEN]; int error; /* * NB: we could copyin ieee80211req_sta_stats so apps * could make selective changes but that's overkill; * just clear all stats for now. */ if (ireq->i_len < IEEE80211_ADDR_LEN) return EINVAL; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; /* XXX require ni_vap == vap? */ memset(&ni->ni_stats, 0, sizeof(ni->ni_stats)); ieee80211_free_node(ni); return 0; } static int ieee80211_ioctl_setstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_txpow txpow; int error; if (ireq->i_len != sizeof(txpow)) return EINVAL; error = copyin(ireq->i_data, &txpow, sizeof(txpow)); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr); if (ni == NULL) return ENOENT; ni->ni_txpower = txpow.it_txpow; ieee80211_free_node(ni); return error; } static int ieee80211_ioctl_setwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; struct wmeParams *wmep, *chanp; int isbss, ac, aggrmode; if ((ic->ic_caps & IEEE80211_C_WME) == 0) return EOPNOTSUPP; isbss = (ireq->i_len & IEEE80211_WMEPARAM_BSS); ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL); aggrmode = (wme->wme_flags & WME_F_AGGRMODE); if (ac >= WME_NUM_AC) ac = WME_AC_BE; if (isbss) { chanp = &wme->wme_bssChanParams.cap_wmeParams[ac]; wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; } else { chanp = &wme->wme_chanParams.cap_wmeParams[ac]; wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; } switch (ireq->i_type) { case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ wmep->wmep_logcwmin = ireq->i_val; if (!isbss || !aggrmode) chanp->wmep_logcwmin = ireq->i_val; break; case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ wmep->wmep_logcwmax = ireq->i_val; if (!isbss || !aggrmode) chanp->wmep_logcwmax = ireq->i_val; break; case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ wmep->wmep_aifsn = ireq->i_val; if (!isbss || !aggrmode) chanp->wmep_aifsn = ireq->i_val; break; case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ wmep->wmep_txopLimit = ireq->i_val; if (!isbss || !aggrmode) chanp->wmep_txopLimit = ireq->i_val; break; case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ wmep->wmep_acm = ireq->i_val; if (!aggrmode) chanp->wmep_acm = ireq->i_val; break; case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/ wmep->wmep_noackPolicy = chanp->wmep_noackPolicy = (ireq->i_val) == 0; break; } ieee80211_wme_updateparams(vap); return 0; } static int find11gchannel(struct ieee80211com *ic, int start, int freq) { const struct ieee80211_channel *c; int i; for (i = start+1; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c)) return 1; } /* NB: should not be needed but in case things are mis-sorted */ for (i = 0; i < start; i++) { c = &ic->ic_channels[i]; if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c)) return 1; } return 0; } static struct ieee80211_channel * findchannel(struct ieee80211com *ic, int ieee, int mode) { static const u_int chanflags[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = 0, [IEEE80211_MODE_11A] = IEEE80211_CHAN_A, [IEEE80211_MODE_11B] = IEEE80211_CHAN_B, [IEEE80211_MODE_11G] = IEEE80211_CHAN_G, [IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS, [IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_108A, [IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_108G, [IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_STURBO, [IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF, [IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER, /* NB: handled specially below */ [IEEE80211_MODE_11NA] = IEEE80211_CHAN_A, [IEEE80211_MODE_11NG] = IEEE80211_CHAN_G, [IEEE80211_MODE_VHT_5GHZ] = IEEE80211_CHAN_A, [IEEE80211_MODE_VHT_2GHZ] = IEEE80211_CHAN_G, }; u_int modeflags; int i; modeflags = chanflags[mode]; for (i = 0; i < ic->ic_nchans; i++) { struct ieee80211_channel *c = &ic->ic_channels[i]; if (c->ic_ieee != ieee) continue; if (mode == IEEE80211_MODE_AUTO) { /* ignore turbo channels for autoselect */ if (IEEE80211_IS_CHAN_TURBO(c)) continue; /* * XXX special-case 11b/g channels so we * always select the g channel if both * are present. * XXX prefer HT to non-HT? */ if (!IEEE80211_IS_CHAN_B(c) || !find11gchannel(ic, i, c->ic_freq)) return c; } else { /* must check VHT specifically */ if ((mode == IEEE80211_MODE_VHT_5GHZ || mode == IEEE80211_MODE_VHT_2GHZ) && !IEEE80211_IS_CHAN_VHT(c)) continue; /* * Must check HT specially - only match on HT, * not HT+VHT channels */ if ((mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG) && !IEEE80211_IS_CHAN_HT(c)) continue; if ((mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG) && IEEE80211_IS_CHAN_VHT(c)) continue; /* Check that the modeflags above match */ if ((c->ic_flags & modeflags) == modeflags) return c; } } return NULL; } /* * Check the specified against any desired mode (aka netband). * This is only used (presently) when operating in hostap mode * to enforce consistency. */ static int check_mode_consistency(const struct ieee80211_channel *c, int mode) { KASSERT(c != IEEE80211_CHAN_ANYC, ("oops, no channel")); switch (mode) { case IEEE80211_MODE_11B: return (IEEE80211_IS_CHAN_B(c)); case IEEE80211_MODE_11G: return (IEEE80211_IS_CHAN_ANYG(c) && !IEEE80211_IS_CHAN_HT(c)); case IEEE80211_MODE_11A: return (IEEE80211_IS_CHAN_A(c) && !IEEE80211_IS_CHAN_HT(c)); case IEEE80211_MODE_STURBO_A: return (IEEE80211_IS_CHAN_STURBO(c)); case IEEE80211_MODE_11NA: return (IEEE80211_IS_CHAN_HTA(c)); case IEEE80211_MODE_11NG: return (IEEE80211_IS_CHAN_HTG(c)); } return 1; } /* * Common code to set the current channel. If the device * is up and running this may result in an immediate channel * change or a kick of the state machine. */ static int setcurchan(struct ieee80211vap *vap, struct ieee80211_channel *c) { struct ieee80211com *ic = vap->iv_ic; int error; if (c != IEEE80211_CHAN_ANYC) { if (IEEE80211_IS_CHAN_RADAR(c)) return EBUSY; /* XXX better code? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { if (IEEE80211_IS_CHAN_NOHOSTAP(c)) return EINVAL; if (!check_mode_consistency(c, vap->iv_des_mode)) return EINVAL; } else if (vap->iv_opmode == IEEE80211_M_IBSS) { if (IEEE80211_IS_CHAN_NOADHOC(c)) return EINVAL; } if ((vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) && vap->iv_bss->ni_chan == c) return 0; /* NB: nothing to do */ } vap->iv_des_chan = c; error = 0; if (vap->iv_opmode == IEEE80211_M_MONITOR && vap->iv_des_chan != IEEE80211_CHAN_ANYC) { /* * Monitor mode can switch directly. */ if (IFNET_IS_UP_RUNNING(vap->iv_ifp)) { /* XXX need state machine for other vap's to follow */ ieee80211_setcurchan(ic, vap->iv_des_chan); vap->iv_bss->ni_chan = ic->ic_curchan; } else { ic->ic_curchan = vap->iv_des_chan; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); } } else { /* * Need to go through the state machine in case we * need to reassociate or the like. The state machine * will pickup the desired channel and avoid scanning. */ if (IS_UP_AUTO(vap)) ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); else if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) { /* * When not up+running and a real channel has * been specified fix the current channel so * there is immediate feedback; e.g. via ifconfig. */ ic->ic_curchan = vap->iv_des_chan; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); } } return error; } /* * Old api for setting the current channel; this is * deprecated because channel numbers are ambiguous. */ static int ieee80211_ioctl_setchannel(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *c; /* XXX 0xffff overflows 16-bit signed */ if (ireq->i_val == 0 || ireq->i_val == (int16_t) IEEE80211_CHAN_ANY) { c = IEEE80211_CHAN_ANYC; } else { struct ieee80211_channel *c2; c = findchannel(ic, ireq->i_val, vap->iv_des_mode); if (c == NULL) { c = findchannel(ic, ireq->i_val, IEEE80211_MODE_AUTO); if (c == NULL) return EINVAL; } /* * Fine tune channel selection based on desired mode: * if 11b is requested, find the 11b version of any * 11g channel returned, * if static turbo, find the turbo version of any * 11a channel return, * if 11na is requested, find the ht version of any * 11a channel returned, * if 11ng is requested, find the ht version of any * 11g channel returned, * if 11ac is requested, find the 11ac version * of any 11a/11na channel returned, * (TBD) 11acg (2GHz VHT) * otherwise we should be ok with what we've got. */ switch (vap->iv_des_mode) { case IEEE80211_MODE_11B: if (IEEE80211_IS_CHAN_ANYG(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11B); /* NB: should not happen, =>'s 11g w/o 11b */ if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_TURBO_A: if (IEEE80211_IS_CHAN_A(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_TURBO_A); if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_11NA: if (IEEE80211_IS_CHAN_A(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11NA); if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_11NG: if (IEEE80211_IS_CHAN_ANYG(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11NG); if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_VHT_2GHZ: printf("%s: TBD\n", __func__); break; case IEEE80211_MODE_VHT_5GHZ: if (IEEE80211_IS_CHAN_A(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_VHT_5GHZ); if (c2 != NULL) c = c2; } break; default: /* NB: no static turboG */ break; } } return setcurchan(vap, c); } /* * New/current api for setting the current channel; a complete * channel description is provide so there is no ambiguity in * identifying the channel. */ static int ieee80211_ioctl_setcurchan(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel chan, *c; int error; if (ireq->i_len != sizeof(chan)) return EINVAL; error = copyin(ireq->i_data, &chan, sizeof(chan)); if (error != 0) return error; /* XXX 0xffff overflows 16-bit signed */ if (chan.ic_freq == 0 || chan.ic_freq == IEEE80211_CHAN_ANY) { c = IEEE80211_CHAN_ANYC; } else { c = ieee80211_find_channel(ic, chan.ic_freq, chan.ic_flags); if (c == NULL) return EINVAL; } return setcurchan(vap, c); } static int ieee80211_ioctl_setregdomain(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211_regdomain_req *reg; int nchans, error; nchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_regdomain_req)) / sizeof(struct ieee80211_channel)); if (!(1 <= nchans && nchans <= IEEE80211_CHAN_MAX)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: bad # chans, i_len %d nchans %d\n", __func__, ireq->i_len, nchans); return EINVAL; } reg = (struct ieee80211_regdomain_req *) IEEE80211_MALLOC(IEEE80211_REGDOMAIN_SIZE(nchans), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (reg == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: no memory, nchans %d\n", __func__, nchans); return ENOMEM; } error = copyin(ireq->i_data, reg, IEEE80211_REGDOMAIN_SIZE(nchans)); if (error == 0) { /* NB: validate inline channel count against storage size */ if (reg->chaninfo.ic_nchans != nchans) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: chan cnt mismatch, %d != %d\n", __func__, reg->chaninfo.ic_nchans, nchans); error = EINVAL; } else error = ieee80211_setregdomain(vap, reg); } IEEE80211_FREE(reg, M_TEMP); return (error == 0 ? ENETRESET : error); } static int checkrate(const struct ieee80211_rateset *rs, int rate) { int i; if (rate == IEEE80211_FIXED_RATE_NONE) return 1; for (i = 0; i < rs->rs_nrates; i++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rate) return 1; return 0; } static int checkmcs(const struct ieee80211_htrateset *rs, int mcs) { int rate_val = IEEE80211_RV(mcs); int i; if (mcs == IEEE80211_FIXED_RATE_NONE) return 1; if ((mcs & IEEE80211_RATE_MCS) == 0) /* MCS always have 0x80 set */ return 0; for (i = 0; i < rs->rs_nrates; i++) if (IEEE80211_RV(rs->rs_rates[i]) == rate_val) return 1; return 0; } static int ieee80211_ioctl_setroam(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_roamparams_req *parms; struct ieee80211_roamparam *src, *dst; const struct ieee80211_htrateset *rs_ht; const struct ieee80211_rateset *rs; int changed, error, mode, is11n, nmodes; if (ireq->i_len != sizeof(vap->iv_roamparms)) return EINVAL; parms = IEEE80211_MALLOC(sizeof(*parms), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (parms == NULL) return ENOMEM; error = copyin(ireq->i_data, parms, ireq->i_len); if (error != 0) goto fail; changed = 0; nmodes = IEEE80211_MODE_MAX; /* validate parameters and check if anything changed */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; src = &parms->params[mode]; dst = &vap->iv_roamparms[mode]; rs = &ic->ic_sup_rates[mode]; /* NB: 11n maps to legacy */ rs_ht = &ic->ic_sup_htrates; is11n = (mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG); /* XXX TODO: 11ac */ if (src->rate != dst->rate) { if (!checkrate(rs, src->rate) && (!is11n || !checkmcs(rs_ht, src->rate))) { error = EINVAL; goto fail; } changed++; } if (src->rssi != dst->rssi) changed++; } if (changed) { /* * Copy new parameters in place and notify the * driver so it can push state to the device. */ /* XXX locking? */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isset(ic->ic_modecaps, mode)) vap->iv_roamparms[mode] = parms->params[mode]; } if (vap->iv_roaming == IEEE80211_ROAMING_DEVICE) error = ERESTART; } fail: IEEE80211_FREE(parms, M_TEMP); return error; } static int ieee80211_ioctl_settxparams(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_txparams_req parms; /* XXX stack use? */ struct ieee80211_txparam *src, *dst; const struct ieee80211_htrateset *rs_ht; const struct ieee80211_rateset *rs; int error, mode, changed, is11n, nmodes; /* NB: accept short requests for backwards compat */ if (ireq->i_len > sizeof(parms)) return EINVAL; error = copyin(ireq->i_data, &parms, ireq->i_len); if (error != 0) return error; nmodes = ireq->i_len / sizeof(struct ieee80211_txparam); changed = 0; /* validate parameters and check if anything changed */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; src = &parms.params[mode]; dst = &vap->iv_txparms[mode]; rs = &ic->ic_sup_rates[mode]; /* NB: 11n maps to legacy */ rs_ht = &ic->ic_sup_htrates; is11n = (mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG); if (src->ucastrate != dst->ucastrate) { if (!checkrate(rs, src->ucastrate) && (!is11n || !checkmcs(rs_ht, src->ucastrate))) return EINVAL; changed++; } if (src->mcastrate != dst->mcastrate) { if (!checkrate(rs, src->mcastrate) && (!is11n || !checkmcs(rs_ht, src->mcastrate))) return EINVAL; changed++; } if (src->mgmtrate != dst->mgmtrate) { if (!checkrate(rs, src->mgmtrate) && (!is11n || !checkmcs(rs_ht, src->mgmtrate))) return EINVAL; changed++; } if (src->maxretry != dst->maxretry) /* NB: no bounds */ changed++; } if (changed) { /* * Copy new parameters in place and notify the * driver so it can push state to the device. */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isset(ic->ic_modecaps, mode)) vap->iv_txparms[mode] = parms.params[mode]; } /* XXX could be more intelligent, e.g. don't reset if setting not being used */ return ENETRESET; } return 0; } /* * Application Information Element support. */ static int setappie(struct ieee80211_appie **aie, const struct ieee80211req *ireq) { struct ieee80211_appie *app = *aie; struct ieee80211_appie *napp; int error; if (ireq->i_len == 0) { /* delete any existing ie */ if (app != NULL) { *aie = NULL; /* XXX racey */ IEEE80211_FREE(app, M_80211_NODE_IE); } return 0; } if (!(2 <= ireq->i_len && ireq->i_len <= IEEE80211_MAX_APPIE)) return EINVAL; /* * Allocate a new appie structure and copy in the user data. * When done swap in the new structure. Note that we do not * guard against users holding a ref to the old structure; * this must be handled outside this code. * * XXX bad bad bad */ napp = (struct ieee80211_appie *) IEEE80211_MALLOC( sizeof(struct ieee80211_appie) + ireq->i_len, M_80211_NODE_IE, IEEE80211_M_NOWAIT); if (napp == NULL) return ENOMEM; /* XXX holding ic lock */ error = copyin(ireq->i_data, napp->ie_data, ireq->i_len); if (error) { IEEE80211_FREE(napp, M_80211_NODE_IE); return error; } napp->ie_len = ireq->i_len; *aie = napp; if (app != NULL) IEEE80211_FREE(app, M_80211_NODE_IE); return 0; } static void setwparsnie(struct ieee80211vap *vap, uint8_t *ie, int space) { /* validate data is present as best we can */ if (space == 0 || 2+ie[1] > space) return; if (ie[0] == IEEE80211_ELEMID_VENDOR) vap->iv_wpa_ie = ie; else if (ie[0] == IEEE80211_ELEMID_RSN) vap->iv_rsn_ie = ie; } static int ieee80211_ioctl_setappie_locked(struct ieee80211vap *vap, const struct ieee80211req *ireq, int fc0) { int error; IEEE80211_LOCK_ASSERT(vap->iv_ic); switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_IBSS) { error = EINVAL; break; } error = setappie(&vap->iv_appie_beacon, ireq); if (error == 0) ieee80211_beacon_notify(vap, IEEE80211_BEACON_APPIE); break; case IEEE80211_FC0_SUBTYPE_PROBE_RESP: error = setappie(&vap->iv_appie_proberesp, ireq); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: if (vap->iv_opmode == IEEE80211_M_HOSTAP) error = setappie(&vap->iv_appie_assocresp, ireq); else error = EINVAL; break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: error = setappie(&vap->iv_appie_probereq, ireq); break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: if (vap->iv_opmode == IEEE80211_M_STA) error = setappie(&vap->iv_appie_assocreq, ireq); else error = EINVAL; break; case (IEEE80211_APPIE_WPA & IEEE80211_FC0_SUBTYPE_MASK): error = setappie(&vap->iv_appie_wpa, ireq); if (error == 0) { /* * Must split single blob of data into separate * WPA and RSN ie's because they go in different * locations in the mgt frames. * XXX use IEEE80211_IOC_WPA2 so user code does split */ vap->iv_wpa_ie = NULL; vap->iv_rsn_ie = NULL; if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *appie = vap->iv_appie_wpa; uint8_t *data = appie->ie_data; /* XXX ie length validate is painful, cheat */ setwparsnie(vap, data, appie->ie_len); setwparsnie(vap, data + 2 + data[1], appie->ie_len - (2 + data[1])); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { /* * Must rebuild beacon frame as the update * mechanism doesn't handle WPA/RSN ie's. * Could extend it but it doesn't normally * change; this is just to deal with hostapd * plumbing the ie after the interface is up. */ error = ENETRESET; } } break; default: error = EINVAL; break; } return error; } static int ieee80211_ioctl_setappie(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; int error; uint8_t fc0; fc0 = ireq->i_val & 0xff; if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return EINVAL; /* NB: could check iv_opmode and reject but hardly worth the effort */ IEEE80211_LOCK(ic); error = ieee80211_ioctl_setappie_locked(vap, ireq, fc0); IEEE80211_UNLOCK(ic); return error; } static int ieee80211_ioctl_chanswitch(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_chanswitch_req csr; struct ieee80211_channel *c; int error; if (ireq->i_len != sizeof(csr)) return EINVAL; error = copyin(ireq->i_data, &csr, sizeof(csr)); if (error != 0) return error; /* XXX adhoc mode not supported */ if (vap->iv_opmode != IEEE80211_M_HOSTAP || (vap->iv_flags & IEEE80211_F_DOTH) == 0) return EOPNOTSUPP; c = ieee80211_find_channel(ic, csr.csa_chan.ic_freq, csr.csa_chan.ic_flags); if (c == NULL) return ENOENT; IEEE80211_LOCK(ic); if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) ieee80211_csa_startswitch(ic, c, csr.csa_mode, csr.csa_count); else if (csr.csa_count == 0) ieee80211_csa_cancelswitch(ic); else error = EBUSY; IEEE80211_UNLOCK(ic); return error; } static int ieee80211_scanreq(struct ieee80211vap *vap, struct ieee80211_scan_req *sr) { #define IEEE80211_IOC_SCAN_FLAGS \ (IEEE80211_IOC_SCAN_NOPICK | IEEE80211_IOC_SCAN_ACTIVE | \ IEEE80211_IOC_SCAN_PICK1ST | IEEE80211_IOC_SCAN_BGSCAN | \ IEEE80211_IOC_SCAN_ONCE | IEEE80211_IOC_SCAN_NOBCAST | \ IEEE80211_IOC_SCAN_NOJOIN | IEEE80211_IOC_SCAN_FLUSH | \ IEEE80211_IOC_SCAN_CHECK) struct ieee80211com *ic = vap->iv_ic; int error, i; /* convert duration */ if (sr->sr_duration == IEEE80211_IOC_SCAN_FOREVER) sr->sr_duration = IEEE80211_SCAN_FOREVER; else { if (sr->sr_duration < IEEE80211_IOC_SCAN_DURATION_MIN || sr->sr_duration > IEEE80211_IOC_SCAN_DURATION_MAX) return EINVAL; sr->sr_duration = msecs_to_ticks(sr->sr_duration); } /* convert min/max channel dwell */ if (sr->sr_mindwell != 0) sr->sr_mindwell = msecs_to_ticks(sr->sr_mindwell); if (sr->sr_maxdwell != 0) sr->sr_maxdwell = msecs_to_ticks(sr->sr_maxdwell); /* NB: silently reduce ssid count to what is supported */ if (sr->sr_nssid > IEEE80211_SCAN_MAX_SSID) sr->sr_nssid = IEEE80211_SCAN_MAX_SSID; for (i = 0; i < sr->sr_nssid; i++) if (sr->sr_ssid[i].len > IEEE80211_NWID_LEN) return EINVAL; /* cleanse flags just in case, could reject if invalid flags */ sr->sr_flags &= IEEE80211_IOC_SCAN_FLAGS; /* * Add an implicit NOPICK if the vap is not marked UP. This * allows applications to scan without joining a bss (or picking * a channel and setting up a bss) and without forcing manual * roaming mode--you just need to mark the parent device UP. */ if ((vap->iv_ifp->if_flags & IFF_UP) == 0) sr->sr_flags |= IEEE80211_IOC_SCAN_NOPICK; IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: flags 0x%x%s duration 0x%x mindwell %u maxdwell %u nssid %d\n", __func__, sr->sr_flags, (vap->iv_ifp->if_flags & IFF_UP) == 0 ? " (!IFF_UP)" : "", sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid); /* * If we are in INIT state then the driver has never had a chance * to setup hardware state to do a scan; we must use the state * machine to get us up to the SCAN state but once we reach SCAN * state we then want to use the supplied params. Stash the * parameters in the vap and mark IEEE80211_FEXT_SCANREQ; the * state machines will recognize this and use the stashed params * to issue the scan request. * * Otherwise just invoke the scan machinery directly. */ IEEE80211_LOCK(ic); if (ic->ic_nrunning == 0) { IEEE80211_UNLOCK(ic); return ENXIO; } if (vap->iv_state == IEEE80211_S_INIT) { /* NB: clobbers previous settings */ vap->iv_scanreq_flags = sr->sr_flags; vap->iv_scanreq_duration = sr->sr_duration; vap->iv_scanreq_nssid = sr->sr_nssid; for (i = 0; i < sr->sr_nssid; i++) { vap->iv_scanreq_ssid[i].len = sr->sr_ssid[i].len; memcpy(vap->iv_scanreq_ssid[i].ssid, sr->sr_ssid[i].ssid, sr->sr_ssid[i].len); } vap->iv_flags_ext |= IEEE80211_FEXT_SCANREQ; IEEE80211_UNLOCK(ic); ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); } else { vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; IEEE80211_UNLOCK(ic); if (sr->sr_flags & IEEE80211_IOC_SCAN_CHECK) { error = ieee80211_check_scan(vap, sr->sr_flags, sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid, /* NB: cheat, we assume structures are compatible */ (const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]); } else { error = ieee80211_start_scan(vap, sr->sr_flags, sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid, /* NB: cheat, we assume structures are compatible */ (const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]); } if (error == 0) return EINPROGRESS; } return 0; #undef IEEE80211_IOC_SCAN_FLAGS } static int ieee80211_ioctl_scanreq(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_scan_req *sr; int error; if (ireq->i_len != sizeof(*sr)) return EINVAL; sr = IEEE80211_MALLOC(sizeof(*sr), M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (sr == NULL) return ENOMEM; error = copyin(ireq->i_data, sr, sizeof(*sr)); if (error != 0) goto bad; error = ieee80211_scanreq(vap, sr); bad: IEEE80211_FREE(sr, M_TEMP); return error; } static int ieee80211_ioctl_setstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_vlan vlan; int error; if (ireq->i_len != sizeof(vlan)) return EINVAL; error = copyin(ireq->i_data, &vlan, sizeof(vlan)); if (error != 0) return error; if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, vlan.sv_macaddr); if (ni == NULL) return ENOENT; } else ni = ieee80211_ref_node(vap->iv_bss); ni->ni_vlan = vlan.sv_vlan; ieee80211_free_node(ni); return error; } static int isvap11g(const struct ieee80211vap *vap) { const struct ieee80211_node *bss = vap->iv_bss; return bss->ni_chan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_ANYG(bss->ni_chan); } static int isvapht(const struct ieee80211vap *vap) { const struct ieee80211_node *bss = vap->iv_bss; return bss->ni_chan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_HT(bss->ni_chan); } /* * Dummy ioctl set handler so the linker set is defined. */ static int dummy_ioctl_set(struct ieee80211vap *vap, struct ieee80211req *ireq) { return ENOSYS; } IEEE80211_IOCTL_SET(dummy, dummy_ioctl_set); static int ieee80211_ioctl_setdefault(struct ieee80211vap *vap, struct ieee80211req *ireq) { ieee80211_ioctl_setfunc * const *set; int error; SET_FOREACH(set, ieee80211_ioctl_setset) { error = (*set)(vap, ireq); if (error != ENOSYS) return error; } return EINVAL; } static int ieee80211_ioctl_set80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; int error; const struct ieee80211_authenticator *auth; uint8_t tmpkey[IEEE80211_KEYBUF_SIZE]; char tmpssid[IEEE80211_NWID_LEN]; uint8_t tmpbssid[IEEE80211_ADDR_LEN]; struct ieee80211_key *k; u_int kid; uint32_t flags; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_SSID: if (ireq->i_val != 0 || ireq->i_len > IEEE80211_NWID_LEN) return EINVAL; error = copyin(ireq->i_data, tmpssid, ireq->i_len); if (error) break; memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN); vap->iv_des_ssid[0].len = ireq->i_len; memcpy(vap->iv_des_ssid[0].ssid, tmpssid, ireq->i_len); vap->iv_des_nssid = (ireq->i_len > 0); error = ENETRESET; break; case IEEE80211_IOC_WEP: switch (ireq->i_val) { case IEEE80211_WEP_OFF: vap->iv_flags &= ~IEEE80211_F_PRIVACY; vap->iv_flags &= ~IEEE80211_F_DROPUNENC; break; case IEEE80211_WEP_ON: vap->iv_flags |= IEEE80211_F_PRIVACY; vap->iv_flags |= IEEE80211_F_DROPUNENC; break; case IEEE80211_WEP_MIXED: vap->iv_flags |= IEEE80211_F_PRIVACY; vap->iv_flags &= ~IEEE80211_F_DROPUNENC; break; } error = ENETRESET; break; case IEEE80211_IOC_WEPKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID) return EINVAL; k = &vap->iv_nw_keys[kid]; if (ireq->i_len == 0) { /* zero-len =>'s delete any existing key */ (void) ieee80211_crypto_delkey(vap, k); break; } if (ireq->i_len > sizeof(tmpkey)) return EINVAL; memset(tmpkey, 0, sizeof(tmpkey)); error = copyin(ireq->i_data, tmpkey, ireq->i_len); if (error) break; ieee80211_key_update_begin(vap); k->wk_keyix = kid; /* NB: force fixed key id */ if (ieee80211_crypto_newkey(vap, IEEE80211_CIPHER_WEP, IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV, k)) { k->wk_keylen = ireq->i_len; memcpy(k->wk_key, tmpkey, sizeof(tmpkey)); IEEE80211_ADDR_COPY(k->wk_macaddr, vap->iv_myaddr); if (!ieee80211_crypto_setkey(vap, k)) error = EINVAL; } else error = EINVAL; ieee80211_key_update_end(vap); break; case IEEE80211_IOC_WEPTXKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID && (uint16_t) kid != IEEE80211_KEYIX_NONE) return EINVAL; /* * Firmware devices may need to be told about an explicit * key index here, versus just inferring it from the * key set / change. Since we may also need to pause * things like transmit before the key is updated, * give the driver a chance to flush things by tying * into key update begin/end. */ ieee80211_key_update_begin(vap); ieee80211_crypto_set_deftxkey(vap, kid); ieee80211_key_update_end(vap); break; case IEEE80211_IOC_AUTHMODE: switch (ireq->i_val) { case IEEE80211_AUTH_WPA: case IEEE80211_AUTH_8021X: /* 802.1x */ case IEEE80211_AUTH_OPEN: /* open */ case IEEE80211_AUTH_SHARED: /* shared-key */ case IEEE80211_AUTH_AUTO: /* auto */ auth = ieee80211_authenticator_get(ireq->i_val); if (auth == NULL) return EINVAL; break; default: return EINVAL; } switch (ireq->i_val) { case IEEE80211_AUTH_WPA: /* WPA w/ 802.1x */ vap->iv_flags |= IEEE80211_F_PRIVACY; ireq->i_val = IEEE80211_AUTH_8021X; break; case IEEE80211_AUTH_OPEN: /* open */ vap->iv_flags &= ~(IEEE80211_F_WPA|IEEE80211_F_PRIVACY); break; case IEEE80211_AUTH_SHARED: /* shared-key */ case IEEE80211_AUTH_8021X: /* 802.1x */ vap->iv_flags &= ~IEEE80211_F_WPA; /* both require a key so mark the PRIVACY capability */ vap->iv_flags |= IEEE80211_F_PRIVACY; break; case IEEE80211_AUTH_AUTO: /* auto */ vap->iv_flags &= ~IEEE80211_F_WPA; /* XXX PRIVACY handling? */ /* XXX what's the right way to do this? */ break; } /* NB: authenticator attach/detach happens on state change */ vap->iv_bss->ni_authmode = ireq->i_val; /* XXX mixed/mode/usage? */ vap->iv_auth = auth; error = ENETRESET; break; case IEEE80211_IOC_CHANNEL: error = ieee80211_ioctl_setchannel(vap, ireq); break; case IEEE80211_IOC_POWERSAVE: switch (ireq->i_val) { case IEEE80211_POWERSAVE_OFF: if (vap->iv_flags & IEEE80211_F_PMGTON) { ieee80211_syncflag(vap, -IEEE80211_F_PMGTON); error = ERESTART; } break; case IEEE80211_POWERSAVE_ON: if ((vap->iv_caps & IEEE80211_C_PMGT) == 0) error = EOPNOTSUPP; else if ((vap->iv_flags & IEEE80211_F_PMGTON) == 0) { ieee80211_syncflag(vap, IEEE80211_F_PMGTON); error = ERESTART; } break; default: error = EINVAL; break; } break; case IEEE80211_IOC_POWERSAVESLEEP: if (ireq->i_val < 0) return EINVAL; ic->ic_lintval = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_RTSTHRESHOLD: if (!(IEEE80211_RTS_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_RTS_MAX)) return EINVAL; vap->iv_rtsthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_PROTMODE: if (ireq->i_val > IEEE80211_PROT_RTSCTS) return EINVAL; vap->iv_protmode = (enum ieee80211_protmode)ireq->i_val; /* NB: if not operating in 11g this can wait */ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan)) error = ERESTART; /* driver callback for protection mode update */ ieee80211_vap_update_erp_protmode(vap); break; case IEEE80211_IOC_TXPOWER: if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0) return EOPNOTSUPP; if (!(IEEE80211_TXPOWER_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_TXPOWER_MAX)) return EINVAL; ic->ic_txpowlimit = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_ROAMING: if (!(IEEE80211_ROAMING_DEVICE <= ireq->i_val && ireq->i_val <= IEEE80211_ROAMING_MANUAL)) return EINVAL; vap->iv_roaming = (enum ieee80211_roamingmode)ireq->i_val; /* XXXX reset? */ break; case IEEE80211_IOC_PRIVACY: if (ireq->i_val) { /* XXX check for key state? */ vap->iv_flags |= IEEE80211_F_PRIVACY; } else vap->iv_flags &= ~IEEE80211_F_PRIVACY; /* XXX ERESTART? */ break; case IEEE80211_IOC_DROPUNENCRYPTED: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_DROPUNENC; else vap->iv_flags &= ~IEEE80211_F_DROPUNENC; /* XXX ERESTART? */ break; case IEEE80211_IOC_WPAKEY: error = ieee80211_ioctl_setkey(vap, ireq); break; case IEEE80211_IOC_DELKEY: error = ieee80211_ioctl_delkey(vap, ireq); break; case IEEE80211_IOC_MLME: error = ieee80211_ioctl_setmlme(vap, ireq); break; case IEEE80211_IOC_COUNTERMEASURES: if (ireq->i_val) { if ((vap->iv_flags & IEEE80211_F_WPA) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_COUNTERM; } else vap->iv_flags &= ~IEEE80211_F_COUNTERM; /* XXX ERESTART? */ break; case IEEE80211_IOC_WPA: if (ireq->i_val > 3) return EINVAL; /* XXX verify ciphers available */ flags = vap->iv_flags & ~IEEE80211_F_WPA; switch (ireq->i_val) { case 0: /* wpa_supplicant calls this to clear the WPA config */ break; case 1: if (!(vap->iv_caps & IEEE80211_C_WPA1)) return EOPNOTSUPP; flags |= IEEE80211_F_WPA1; break; case 2: if (!(vap->iv_caps & IEEE80211_C_WPA2)) return EOPNOTSUPP; flags |= IEEE80211_F_WPA2; break; case 3: if ((vap->iv_caps & IEEE80211_C_WPA) != IEEE80211_C_WPA) return EOPNOTSUPP; flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2; break; default: /* Can't set any -> error */ return EOPNOTSUPP; } vap->iv_flags = flags; error = ERESTART; /* NB: can change beacon frame */ break; case IEEE80211_IOC_WME: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WME) == 0) return EOPNOTSUPP; ieee80211_syncflag(vap, IEEE80211_F_WME); } else ieee80211_syncflag(vap, -IEEE80211_F_WME); error = ERESTART; /* NB: can change beacon frame */ break; case IEEE80211_IOC_HIDESSID: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_HIDESSID; else vap->iv_flags &= ~IEEE80211_F_HIDESSID; error = ERESTART; /* XXX ENETRESET? */ break; case IEEE80211_IOC_APBRIDGE: if (ireq->i_val == 0) vap->iv_flags |= IEEE80211_F_NOBRIDGE; else vap->iv_flags &= ~IEEE80211_F_NOBRIDGE; break; case IEEE80211_IOC_BSSID: if (ireq->i_len != sizeof(tmpbssid)) return EINVAL; error = copyin(ireq->i_data, tmpbssid, ireq->i_len); if (error) break; IEEE80211_ADDR_COPY(vap->iv_des_bssid, tmpbssid); if (IEEE80211_ADDR_EQ(vap->iv_des_bssid, zerobssid)) vap->iv_flags &= ~IEEE80211_F_DESBSSID; else vap->iv_flags |= IEEE80211_F_DESBSSID; error = ENETRESET; break; case IEEE80211_IOC_CHANLIST: error = ieee80211_ioctl_setchanlist(vap, ireq); break; #define OLD_IEEE80211_IOC_SCAN_REQ 23 #ifdef OLD_IEEE80211_IOC_SCAN_REQ case OLD_IEEE80211_IOC_SCAN_REQ: IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: active scan request\n", __func__); /* * If we are in INIT state then the driver has never * had a chance to setup hardware state to do a scan; * use the state machine to get us up the SCAN state. * Otherwise just invoke the scan machinery to start * a one-time scan. */ if (vap->iv_state == IEEE80211_S_INIT) ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); else (void) ieee80211_start_scan(vap, IEEE80211_SCAN_ACTIVE | IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_ONCE, IEEE80211_SCAN_FOREVER, 0, 0, /* XXX use ioctl params */ vap->iv_des_nssid, vap->iv_des_ssid); break; #endif /* OLD_IEEE80211_IOC_SCAN_REQ */ case IEEE80211_IOC_SCAN_REQ: error = ieee80211_ioctl_scanreq(vap, ireq); break; case IEEE80211_IOC_SCAN_CANCEL: IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: cancel scan\n", __func__); ieee80211_cancel_scan(vap); break; case IEEE80211_IOC_HTCONF: if (ireq->i_val & 1) ieee80211_syncflag_ht(vap, IEEE80211_FHT_HT); else ieee80211_syncflag_ht(vap, -IEEE80211_FHT_HT); if (ireq->i_val & 2) ieee80211_syncflag_ht(vap, IEEE80211_FHT_USEHT40); else ieee80211_syncflag_ht(vap, -IEEE80211_FHT_USEHT40); error = ENETRESET; break; case IEEE80211_IOC_ADDMAC: case IEEE80211_IOC_DELMAC: error = ieee80211_ioctl_macmac(vap, ireq); break; case IEEE80211_IOC_MACCMD: error = ieee80211_ioctl_setmaccmd(vap, ireq); break; case IEEE80211_IOC_STA_STATS: error = ieee80211_ioctl_setstastats(vap, ireq); break; case IEEE80211_IOC_STA_TXPOW: error = ieee80211_ioctl_setstatxpow(vap, ireq); break; case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only) */ error = ieee80211_ioctl_setwmeparam(vap, ireq); break; case IEEE80211_IOC_DTIM_PERIOD: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_MBSS && vap->iv_opmode != IEEE80211_M_IBSS) return EINVAL; if (IEEE80211_DTIM_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_DTIM_MAX) { vap->iv_dtim_period = ireq->i_val; error = ENETRESET; /* requires restart */ } else error = EINVAL; break; case IEEE80211_IOC_BEACON_INTERVAL: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_MBSS && vap->iv_opmode != IEEE80211_M_IBSS) return EINVAL; if (IEEE80211_BINTVAL_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_BINTVAL_MAX) { ic->ic_bintval = ireq->i_val; error = ENETRESET; /* requires restart */ } else error = EINVAL; break; case IEEE80211_IOC_PUREG: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_PUREG; else vap->iv_flags &= ~IEEE80211_F_PUREG; /* NB: reset only if we're operating on an 11g channel */ if (isvap11g(vap)) error = ENETRESET; break; case IEEE80211_IOC_QUIET: vap->iv_quiet= ireq->i_val; break; case IEEE80211_IOC_QUIET_COUNT: vap->iv_quiet_count=ireq->i_val; break; case IEEE80211_IOC_QUIET_PERIOD: vap->iv_quiet_period=ireq->i_val; break; case IEEE80211_IOC_QUIET_OFFSET: vap->iv_quiet_offset=ireq->i_val; break; case IEEE80211_IOC_QUIET_DUR: if(ireq->i_val < vap->iv_bss->ni_intval) vap->iv_quiet_duration = ireq->i_val; else error = EINVAL; break; case IEEE80211_IOC_BGSCAN: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_BGSCAN; } else vap->iv_flags &= ~IEEE80211_F_BGSCAN; break; case IEEE80211_IOC_BGSCAN_IDLE: if (ireq->i_val >= IEEE80211_BGSCAN_IDLE_MIN) vap->iv_bgscanidle = ireq->i_val*hz/1000; else error = EINVAL; break; case IEEE80211_IOC_BGSCAN_INTERVAL: if (ireq->i_val >= IEEE80211_BGSCAN_INTVAL_MIN) vap->iv_bgscanintvl = ireq->i_val*hz; else error = EINVAL; break; case IEEE80211_IOC_SCANVALID: if (ireq->i_val >= IEEE80211_SCAN_VALID_MIN) vap->iv_scanvalid = ireq->i_val*hz; else error = EINVAL; break; case IEEE80211_IOC_FRAGTHRESHOLD: if ((vap->iv_caps & IEEE80211_C_TXFRAG) == 0 && ireq->i_val != IEEE80211_FRAG_MAX) return EOPNOTSUPP; if (!(IEEE80211_FRAG_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_FRAG_MAX)) return EINVAL; vap->iv_fragthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_BURST: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_BURST) == 0) return EOPNOTSUPP; ieee80211_syncflag(vap, IEEE80211_F_BURST); } else ieee80211_syncflag(vap, -IEEE80211_F_BURST); error = ERESTART; break; case IEEE80211_IOC_BMISSTHRESHOLD: if (!(IEEE80211_HWBMISS_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_HWBMISS_MAX)) return EINVAL; vap->iv_bmissthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_CURCHAN: error = ieee80211_ioctl_setcurchan(vap, ireq); break; case IEEE80211_IOC_SHORTGI: if (ireq->i_val) { #define IEEE80211_HTCAP_SHORTGI \ (IEEE80211_HTCAP_SHORTGI20 | IEEE80211_HTCAP_SHORTGI40) if (((ireq->i_val ^ vap->iv_htcaps) & IEEE80211_HTCAP_SHORTGI) != 0) return EINVAL; if (ireq->i_val & IEEE80211_HTCAP_SHORTGI20) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20; if (ireq->i_val & IEEE80211_HTCAP_SHORTGI40) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40; #undef IEEE80211_HTCAP_SHORTGI } else vap->iv_flags_ht &= ~(IEEE80211_FHT_SHORTGI20 | IEEE80211_FHT_SHORTGI40); error = ERESTART; break; case IEEE80211_IOC_AMPDU: if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMPDU) == 0) return EINVAL; if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX; if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_AMPDU_LIMIT: /* XXX TODO: figure out ampdu_limit versus ampdu_rxmax */ if (!(IEEE80211_HTCAP_MAXRXAMPDU_8K <= ireq->i_val && ireq->i_val <= IEEE80211_HTCAP_MAXRXAMPDU_64K)) return EINVAL; if (vap->iv_opmode == IEEE80211_M_HOSTAP) vap->iv_ampdu_rxmax = ireq->i_val; else vap->iv_ampdu_limit = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_AMPDU_DENSITY: if (!(IEEE80211_HTCAP_MPDUDENSITY_NA <= ireq->i_val && ireq->i_val <= IEEE80211_HTCAP_MPDUDENSITY_16)) return EINVAL; vap->iv_ampdu_density = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_AMSDU: if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMSDU) == 0) return EINVAL; if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_TX; if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_AMSDU_LIMIT: /* XXX validate */ vap->iv_amsdu_limit = ireq->i_val; /* XXX truncation? */ break; case IEEE80211_IOC_PUREN: if (ireq->i_val) { if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0) return EINVAL; vap->iv_flags_ht |= IEEE80211_FHT_PUREN; } else vap->iv_flags_ht &= ~IEEE80211_FHT_PUREN; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_DOTH: if (ireq->i_val) { #if 0 /* XXX no capability */ if ((vap->iv_caps & IEEE80211_C_DOTH) == 0) return EOPNOTSUPP; #endif vap->iv_flags |= IEEE80211_F_DOTH; } else vap->iv_flags &= ~IEEE80211_F_DOTH; error = ENETRESET; break; case IEEE80211_IOC_REGDOMAIN: error = ieee80211_ioctl_setregdomain(vap, ireq); break; case IEEE80211_IOC_ROAM: error = ieee80211_ioctl_setroam(vap, ireq); break; case IEEE80211_IOC_TXPARAMS: error = ieee80211_ioctl_settxparams(vap, ireq); break; case IEEE80211_IOC_HTCOMPAT: if (ireq->i_val) { if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0) return EOPNOTSUPP; vap->iv_flags_ht |= IEEE80211_FHT_HTCOMPAT; } else vap->iv_flags_ht &= ~IEEE80211_FHT_HTCOMPAT; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_DWDS: if (ireq->i_val) { /* NB: DWDS only makes sense for WDS-capable devices */ if ((ic->ic_caps & IEEE80211_C_WDS) == 0) return EOPNOTSUPP; /* NB: DWDS is used only with ap+sta vaps */ if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_STA) return EINVAL; vap->iv_flags |= IEEE80211_F_DWDS; if (vap->iv_opmode == IEEE80211_M_STA) vap->iv_flags_ext |= IEEE80211_FEXT_4ADDR; } else { vap->iv_flags &= ~IEEE80211_F_DWDS; if (vap->iv_opmode == IEEE80211_M_STA) vap->iv_flags_ext &= ~IEEE80211_FEXT_4ADDR; } break; case IEEE80211_IOC_INACTIVITY: if (ireq->i_val) vap->iv_flags_ext |= IEEE80211_FEXT_INACT; else vap->iv_flags_ext &= ~IEEE80211_FEXT_INACT; break; case IEEE80211_IOC_APPIE: error = ieee80211_ioctl_setappie(vap, ireq); break; case IEEE80211_IOC_WPS: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WPA) == 0) return EOPNOTSUPP; vap->iv_flags_ext |= IEEE80211_FEXT_WPS; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_WPS; break; case IEEE80211_IOC_TSN: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WPA) == 0) return EOPNOTSUPP; vap->iv_flags_ext |= IEEE80211_FEXT_TSN; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_TSN; break; case IEEE80211_IOC_CHANSWITCH: error = ieee80211_ioctl_chanswitch(vap, ireq); break; case IEEE80211_IOC_DFS: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_DFS) == 0) return EOPNOTSUPP; /* NB: DFS requires 11h support */ if ((vap->iv_flags & IEEE80211_F_DOTH) == 0) return EINVAL; vap->iv_flags_ext |= IEEE80211_FEXT_DFS; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_DFS; break; case IEEE80211_IOC_DOTD: if (ireq->i_val) vap->iv_flags_ext |= IEEE80211_FEXT_DOTD; else vap->iv_flags_ext &= ~IEEE80211_FEXT_DOTD; if (vap->iv_opmode == IEEE80211_M_STA) error = ENETRESET; break; case IEEE80211_IOC_HTPROTMODE: if (ireq->i_val > IEEE80211_PROT_RTSCTS) return EINVAL; vap->iv_htprotmode = ireq->i_val ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_NONE; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; /* Notify driver layer of HT protmode changes */ ieee80211_vap_update_ht_protmode(vap); break; case IEEE80211_IOC_STA_VLAN: error = ieee80211_ioctl_setstavlan(vap, ireq); break; case IEEE80211_IOC_SMPS: if ((ireq->i_val &~ IEEE80211_HTCAP_SMPS) != 0 || ireq->i_val == 0x0008) /* value of 2 is reserved */ return EINVAL; if (ireq->i_val != IEEE80211_HTCAP_SMPS_OFF && (vap->iv_htcaps & IEEE80211_HTC_SMPS) == 0) return EOPNOTSUPP; vap->iv_htcaps = (vap->iv_htcaps &~ IEEE80211_HTCAP_SMPS) | ireq->i_val; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_RIFS: if (ireq->i_val != 0) { if ((vap->iv_htcaps & IEEE80211_HTC_RIFS) == 0) return EOPNOTSUPP; vap->iv_flags_ht |= IEEE80211_FHT_RIFS; } else vap->iv_flags_ht &= ~IEEE80211_FHT_RIFS; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_STBC: /* Check if we can do STBC TX/RX before changing the setting */ if ((ireq->i_val & 1) && ((vap->iv_htcaps & IEEE80211_HTCAP_TXSTBC) == 0)) return EOPNOTSUPP; if ((ireq->i_val & 2) && ((vap->iv_htcaps & IEEE80211_HTCAP_RXSTBC) == 0)) return EOPNOTSUPP; /* TX */ if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_STBC_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_STBC_TX; /* RX */ if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_STBC_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_STBC_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_LDPC: /* Check if we can do LDPC TX/RX before changing the setting */ if ((ireq->i_val & 1) && (vap->iv_htcaps & IEEE80211_HTC_TXLDPC) == 0) return EOPNOTSUPP; if ((ireq->i_val & 2) && (vap->iv_htcaps & IEEE80211_HTCAP_LDPC) == 0) return EOPNOTSUPP; /* TX */ if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_LDPC_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_LDPC_TX; /* RX */ if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_LDPC_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_LDPC_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_UAPSD: if ((vap->iv_caps & IEEE80211_C_UAPSD) == 0) return EOPNOTSUPP; if (ireq->i_val == 0) vap->iv_flags_ext &= ~IEEE80211_FEXT_UAPSD; else if (ireq->i_val == 1) vap->iv_flags_ext |= IEEE80211_FEXT_UAPSD; else return EINVAL; break; /* VHT */ case IEEE80211_IOC_VHTCONF: if (ireq->i_val & IEEE80211_FVHT_VHT) ieee80211_syncflag_vht(vap, IEEE80211_FVHT_VHT); else ieee80211_syncflag_vht(vap, -IEEE80211_FVHT_VHT); if (ireq->i_val & IEEE80211_FVHT_USEVHT40) ieee80211_syncflag_vht(vap, IEEE80211_FVHT_USEVHT40); else ieee80211_syncflag_vht(vap, -IEEE80211_FVHT_USEVHT40); if (ireq->i_val & IEEE80211_FVHT_USEVHT80) ieee80211_syncflag_vht(vap, IEEE80211_FVHT_USEVHT80); else ieee80211_syncflag_vht(vap, -IEEE80211_FVHT_USEVHT80); if (ireq->i_val & IEEE80211_FVHT_USEVHT160) ieee80211_syncflag_vht(vap, IEEE80211_FVHT_USEVHT160); else ieee80211_syncflag_vht(vap, -IEEE80211_FVHT_USEVHT160); if (ireq->i_val & IEEE80211_FVHT_USEVHT80P80) ieee80211_syncflag_vht(vap, IEEE80211_FVHT_USEVHT80P80); else ieee80211_syncflag_vht(vap, -IEEE80211_FVHT_USEVHT80P80); error = ENETRESET; break; default: error = ieee80211_ioctl_setdefault(vap, ireq); break; } /* * The convention is that ENETRESET means an operation * requires a complete re-initialization of the device (e.g. * changing something that affects the association state). * ERESTART means the request may be handled with only a * reload of the hardware state. We hand ERESTART requests * to the iv_reset callback so the driver can decide. If * a device does not fillin iv_reset then it defaults to one * that returns ENETRESET. Otherwise a driver may return * ENETRESET (in which case a full reset will be done) or * 0 to mean there's no need to do anything (e.g. when the * change has no effect on the driver/device). */ if (error == ERESTART) error = IFNET_IS_UP_RUNNING(vap->iv_ifp) ? vap->iv_reset(vap, ireq->i_type) : 0; if (error == ENETRESET) { /* XXX need to re-think AUTO handling */ if (IS_UP_AUTO(vap)) ieee80211_init(vap); error = 0; } return error; } int ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; int error = 0, wait = 0, ic_used; struct ifreq *ifr; struct ifaddr *ifa; /* XXX */ ic_used = (cmd != SIOCSIFMTU && cmd != SIOCG80211STATS); if (ic_used && (error = ieee80211_com_vincref(vap)) != 0) return (error); switch (cmd) { case SIOCSIFFLAGS: IEEE80211_LOCK(ic); if ((ifp->if_flags ^ vap->iv_ifflags) & IFF_PROMISC) { /* * Enable promiscuous mode when: * 1. Interface is not a member of bridge, or * 2. Requested by user, or * 3. In monitor (or adhoc-demo) mode. */ if (ifp->if_bridge == NULL || (ifp->if_flags & IFF_PPROMISC) != 0 || vap->iv_opmode == IEEE80211_M_MONITOR || (vap->iv_opmode == IEEE80211_M_AHDEMO && (vap->iv_caps & IEEE80211_C_TDMA) == 0)) { ieee80211_promisc(vap, ifp->if_flags & IFF_PROMISC); vap->iv_ifflags ^= IFF_PROMISC; } } if ((ifp->if_flags ^ vap->iv_ifflags) & IFF_ALLMULTI) { ieee80211_allmulti(vap, ifp->if_flags & IFF_ALLMULTI); vap->iv_ifflags ^= IFF_ALLMULTI; } if (ifp->if_flags & IFF_UP) { /* * Bring ourself up unless we're already operational. * If we're the first vap and the parent is not up * then it will automatically be brought up as a * side-effect of bringing ourself up. */ if (vap->iv_state == IEEE80211_S_INIT) { if (ic->ic_nrunning == 0) wait = 1; ieee80211_start_locked(vap); } } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Stop ourself. If we are the last vap to be * marked down the parent will also be taken down. */ if (ic->ic_nrunning == 1) wait = 1; ieee80211_stop_locked(vap); } IEEE80211_UNLOCK(ic); /* Wait for parent ioctl handler if it was queued */ if (wait) { struct epoch_tracker et; ieee80211_waitfor_parent(ic); /* * Check if the MAC address was changed * via SIOCSIFLLADDR ioctl. * * NB: device may be detached during initialization; * use if_ioctl for existence check. */ NET_EPOCH_ENTER(et); if (ifp->if_ioctl == ieee80211_ioctl && (ifp->if_flags & IFF_UP) == 0 && !IEEE80211_ADDR_EQ(vap->iv_myaddr, IF_LLADDR(ifp))) IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp)); NET_EPOCH_EXIT(et); } break; case SIOCADDMULTI: case SIOCDELMULTI: ieee80211_runtask(ic, &ic->ic_mcast_task); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &vap->iv_media, cmd); break; case SIOCG80211: error = ieee80211_ioctl_get80211(vap, cmd, (struct ieee80211req *) data); break; case SIOCS80211: error = ieee80211_priv_check_vap_manage(cmd, vap, ifp); if (error == 0) error = ieee80211_ioctl_set80211(vap, cmd, (struct ieee80211req *) data); break; case SIOCG80211STATS: ifr = (struct ifreq *)data; copyout(&vap->iv_stats, ifr_data_get_ptr(ifr), sizeof (vap->iv_stats)); break; case SIOCSIFMTU: ifr = (struct ifreq *)data; if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu && ifr->ifr_mtu <= IEEE80211_MTU_MAX)) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFADDR: /* * XXX Handle this directly so we can suppress if_init calls. * XXX This should be done in ether_ioctl but for the moment * XXX there are too many other parts of the system that * XXX set IFF_UP and so suppress if_init being called when * XXX it should be. */ ifa = (struct ifaddr *) data; switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: if ((ifp->if_flags & IFF_UP) == 0) { ifp->if_flags |= IFF_UP; ifp->if_init(ifp->if_softc); } arp_ifinit(ifp, ifa); break; #endif default: if ((ifp->if_flags & IFF_UP) == 0) { ifp->if_flags |= IFF_UP; ifp->if_init(ifp->if_softc); } break; } break; case SIOCSIFLLADDR: error = ieee80211_priv_check_vap_setmac(cmd, vap, ifp); if (error == 0) break; /* Fallthrough */ default: /* * Pass unknown ioctls first to the driver, and if it * returns ENOTTY, then to the generic Ethernet handler. */ if (ic->ic_ioctl != NULL && (error = ic->ic_ioctl(ic, cmd, data)) != ENOTTY) break; error = ether_ioctl(ifp, cmd, data); break; } if (ic_used) ieee80211_com_vdecref(vap); return (error); } diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c index 6d1f5a99fda5..79ec5c9b7de8 100644 --- a/sys/net80211/ieee80211_mesh.c +++ b/sys/net80211/ieee80211_mesh.c @@ -1,3616 +1,3616 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 The FreeBSD Foundation * * This software was developed by Rui Paulo under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11s Mesh Point (MBSS) support. * * Based on March 2009, D3.0 802.11s draft spec. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include static void mesh_rt_flush_invalid(struct ieee80211vap *); static int mesh_select_proto_path(struct ieee80211vap *, const char *); static int mesh_select_proto_metric(struct ieee80211vap *, const char *); static void mesh_vattach(struct ieee80211vap *); static int mesh_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void mesh_rt_cleanup_cb(void *); static void mesh_gatemode_setup(struct ieee80211vap *); static void mesh_gatemode_cb(void *); static void mesh_linkchange(struct ieee80211_node *, enum ieee80211_mesh_mlstate); static void mesh_checkid(void *, struct ieee80211_node *); static uint32_t mesh_generateid(struct ieee80211vap *); static int mesh_checkpseq(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN], uint32_t); static void mesh_transmit_to_gate(struct ieee80211vap *, struct mbuf *, struct ieee80211_mesh_route *); static void mesh_forward(struct ieee80211vap *, struct mbuf *, const struct ieee80211_meshcntl *); static int mesh_input(struct ieee80211_node *, struct mbuf *, const struct ieee80211_rx_stats *rxs, int, int); static void mesh_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *rxs, int, int); static void mesh_recv_ctl(struct ieee80211_node *, struct mbuf *, int); static void mesh_peer_timeout_setup(struct ieee80211_node *); static void mesh_peer_timeout_backoff(struct ieee80211_node *); static void mesh_peer_timeout_cb(void *); static __inline void mesh_peer_timeout_stop(struct ieee80211_node *); static int mesh_verify_meshid(struct ieee80211vap *, const uint8_t *); static int mesh_verify_meshconf(struct ieee80211vap *, const uint8_t *); static int mesh_verify_meshpeer(struct ieee80211vap *, uint8_t, const uint8_t *); uint32_t mesh_airtime_calc(struct ieee80211_node *); /* * Timeout values come from the specification and are in milliseconds. */ static SYSCTL_NODE(_net_wlan, OID_AUTO, mesh, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "IEEE 802.11s parameters"); static int ieee80211_mesh_gateint = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, gateint, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_mesh_gateint, 0, ieee80211_sysctl_msecs_ticks, "I", "mesh gate interval (ms)"); static int ieee80211_mesh_retrytimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, retrytimeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_mesh_retrytimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Retry timeout (msec)"); static int ieee80211_mesh_holdingtimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, holdingtimeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_mesh_holdingtimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Holding state timeout (msec)"); static int ieee80211_mesh_confirmtimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, confirmtimeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_mesh_confirmtimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Confirm state timeout (msec)"); static int ieee80211_mesh_backofftimeout = -1; SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, backofftimeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_mesh_backofftimeout, 0, ieee80211_sysctl_msecs_ticks, "I", "Backoff timeout (msec). This is to throutles peering forever when " "not receiving answer or is rejected by a neighbor"); static int ieee80211_mesh_maxretries = 2; SYSCTL_INT(_net_wlan_mesh, OID_AUTO, maxretries, CTLFLAG_RW, &ieee80211_mesh_maxretries, 0, "Maximum retries during peer link establishment"); static int ieee80211_mesh_maxholding = 2; SYSCTL_INT(_net_wlan_mesh, OID_AUTO, maxholding, CTLFLAG_RW, &ieee80211_mesh_maxholding, 0, "Maximum times we are allowed to transition to HOLDING state before " "backinoff during peer link establishment"); static const uint8_t broadcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static ieee80211_recv_action_func mesh_recv_action_meshpeering_open; static ieee80211_recv_action_func mesh_recv_action_meshpeering_confirm; static ieee80211_recv_action_func mesh_recv_action_meshpeering_close; static ieee80211_recv_action_func mesh_recv_action_meshlmetric; static ieee80211_recv_action_func mesh_recv_action_meshgate; static ieee80211_send_action_func mesh_send_action_meshpeering_open; static ieee80211_send_action_func mesh_send_action_meshpeering_confirm; static ieee80211_send_action_func mesh_send_action_meshpeering_close; static ieee80211_send_action_func mesh_send_action_meshlmetric; static ieee80211_send_action_func mesh_send_action_meshgate; static const struct ieee80211_mesh_proto_metric mesh_metric_airtime = { .mpm_descr = "AIRTIME", .mpm_ie = IEEE80211_MESHCONF_METRIC_AIRTIME, .mpm_metric = mesh_airtime_calc, }; static struct ieee80211_mesh_proto_path mesh_proto_paths[4]; static struct ieee80211_mesh_proto_metric mesh_proto_metrics[4]; MALLOC_DEFINE(M_80211_MESH_PREQ, "80211preq", "802.11 MESH Path Request frame"); MALLOC_DEFINE(M_80211_MESH_PREP, "80211prep", "802.11 MESH Path Reply frame"); MALLOC_DEFINE(M_80211_MESH_PERR, "80211perr", "802.11 MESH Path Error frame"); /* The longer one of the lifetime should be stored as new lifetime */ #define MESH_ROUTE_LIFETIME_MAX(a, b) (a > b ? a : b) MALLOC_DEFINE(M_80211_MESH_RT, "80211mesh_rt", "802.11s routing table"); MALLOC_DEFINE(M_80211_MESH_GT_RT, "80211mesh_gt", "802.11s known gates table"); /* * Helper functions to manipulate the Mesh routing table. */ static struct ieee80211_mesh_route * mesh_rt_find_locked(struct ieee80211_mesh_state *ms, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_route *rt; MESH_RT_LOCK_ASSERT(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { if (IEEE80211_ADDR_EQ(dest, rt->rt_dest)) return rt; } return NULL; } static struct ieee80211_mesh_route * mesh_rt_add_locked(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; KASSERT(!IEEE80211_ADDR_EQ(broadcastaddr, dest), ("%s: adding broadcast to the routing table", __func__)); MESH_RT_LOCK_ASSERT(ms); rt = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_route)) + ms->ms_ppath->mpp_privlen, M_80211_MESH_RT, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (rt != NULL) { rt->rt_vap = vap; IEEE80211_ADDR_COPY(rt->rt_dest, dest); rt->rt_priv = (void *)ALIGN(&rt[1]); MESH_RT_ENTRY_LOCK_INIT(rt, "MBSS_RT"); callout_init(&rt->rt_discovery, 1); rt->rt_updtime = ticks; /* create time */ TAILQ_INSERT_TAIL(&ms->ms_routes, rt, rt_next); } return rt; } struct ieee80211_mesh_route * ieee80211_mesh_rt_find(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; MESH_RT_LOCK(ms); rt = mesh_rt_find_locked(ms, dest); MESH_RT_UNLOCK(ms); return rt; } struct ieee80211_mesh_route * ieee80211_mesh_rt_add(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; KASSERT(ieee80211_mesh_rt_find(vap, dest) == NULL, ("%s: duplicate entry in the routing table", __func__)); KASSERT(!IEEE80211_ADDR_EQ(vap->iv_myaddr, dest), ("%s: adding self to the routing table", __func__)); MESH_RT_LOCK(ms); rt = mesh_rt_add_locked(vap, dest); MESH_RT_UNLOCK(ms); return rt; } /* * Update the route lifetime and returns the updated lifetime. * If new_lifetime is zero and route is timedout it will be invalidated. * new_lifetime is in msec */ int ieee80211_mesh_rt_update(struct ieee80211_mesh_route *rt, int new_lifetime) { int timesince, now; uint32_t lifetime = 0; KASSERT(rt != NULL, ("route is NULL")); now = ticks; MESH_RT_ENTRY_LOCK(rt); /* dont clobber a proxy entry gated by us */ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY && rt->rt_nhops == 0) { MESH_RT_ENTRY_UNLOCK(rt); return rt->rt_lifetime; } timesince = ticks_to_msecs(now - rt->rt_updtime); rt->rt_updtime = now; if (timesince >= rt->rt_lifetime) { if (new_lifetime != 0) { rt->rt_lifetime = new_lifetime; } else { rt->rt_flags &= ~IEEE80211_MESHRT_FLAGS_VALID; rt->rt_lifetime = 0; } } else { /* update what is left of lifetime */ rt->rt_lifetime = rt->rt_lifetime - timesince; rt->rt_lifetime = MESH_ROUTE_LIFETIME_MAX( new_lifetime, rt->rt_lifetime); } lifetime = rt->rt_lifetime; MESH_RT_ENTRY_UNLOCK(rt); return lifetime; } /* * Add a proxy route (as needed) for the specified destination. */ void ieee80211_mesh_proxy_check(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; MESH_RT_LOCK(ms); rt = mesh_rt_find_locked(ms, dest); if (rt == NULL) { rt = mesh_rt_add_locked(vap, dest); if (rt == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "unable to add proxy entry"); vap->iv_stats.is_mesh_rtaddfailed++; } else { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "add proxy entry"); IEEE80211_ADDR_COPY(rt->rt_mesh_gate, vap->iv_myaddr); IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr); rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID | IEEE80211_MESHRT_FLAGS_PROXY; } } else if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { KASSERT(rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY, ("no proxy flag for poxy entry")); struct ieee80211com *ic = vap->iv_ic; /* * Fix existing entry created by received frames from * stations that have some memory of dest. We also * flush any frames held on the staging queue; delivering * them is too much trouble right now. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s", "fix proxy entry"); IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr); rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID | IEEE80211_MESHRT_FLAGS_PROXY; /* XXX belongs in hwmp */ ieee80211_ageq_drain_node(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, dest)); /* XXX stat? */ } MESH_RT_UNLOCK(ms); } static __inline void mesh_rt_del(struct ieee80211_mesh_state *ms, struct ieee80211_mesh_route *rt) { TAILQ_REMOVE(&ms->ms_routes, rt, rt_next); /* * Grab the lock before destroying it, to be sure no one else * is holding the route. */ MESH_RT_ENTRY_LOCK(rt); callout_drain(&rt->rt_discovery); MESH_RT_ENTRY_LOCK_DESTROY(rt); IEEE80211_FREE(rt, M_80211_MESH_RT); } void ieee80211_mesh_rt_del(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { if (IEEE80211_ADDR_EQ(rt->rt_dest, dest)) { if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { ms->ms_ppath->mpp_senderror(vap, dest, rt, IEEE80211_REASON_MESH_PERR_NO_PROXY); } else { ms->ms_ppath->mpp_senderror(vap, dest, rt, IEEE80211_REASON_MESH_PERR_DEST_UNREACH); } mesh_rt_del(ms, rt); MESH_RT_UNLOCK(ms); return; } } MESH_RT_UNLOCK(ms); } void ieee80211_mesh_rt_flush(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; if (ms == NULL) return; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) mesh_rt_del(ms, rt); MESH_RT_UNLOCK(ms); } void ieee80211_mesh_rt_flush_peer(struct ieee80211vap *vap, const uint8_t peer[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { if (IEEE80211_ADDR_EQ(rt->rt_nexthop, peer)) mesh_rt_del(ms, rt); } MESH_RT_UNLOCK(ms); } /* * Flush expired routing entries, i.e. those in invalid state for * some time. */ static void mesh_rt_flush_invalid(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt, *next; if (ms == NULL) return; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) { /* Discover paths will be deleted by their own callout */ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_DISCOVER) continue; ieee80211_mesh_rt_update(rt, 0); if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) mesh_rt_del(ms, rt); } MESH_RT_UNLOCK(ms); } int ieee80211_mesh_register_proto_path(const struct ieee80211_mesh_proto_path *mpp) { int i, firstempty = -1; for (i = 0; i < nitems(mesh_proto_paths); i++) { if (strncmp(mpp->mpp_descr, mesh_proto_paths[i].mpp_descr, IEEE80211_MESH_PROTO_DSZ) == 0) return EEXIST; if (!mesh_proto_paths[i].mpp_active && firstempty == -1) firstempty = i; } if (firstempty < 0) return ENOSPC; memcpy(&mesh_proto_paths[firstempty], mpp, sizeof(*mpp)); mesh_proto_paths[firstempty].mpp_active = 1; return 0; } int ieee80211_mesh_register_proto_metric(const struct ieee80211_mesh_proto_metric *mpm) { int i, firstempty = -1; for (i = 0; i < nitems(mesh_proto_metrics); i++) { if (strncmp(mpm->mpm_descr, mesh_proto_metrics[i].mpm_descr, IEEE80211_MESH_PROTO_DSZ) == 0) return EEXIST; if (!mesh_proto_metrics[i].mpm_active && firstempty == -1) firstempty = i; } if (firstempty < 0) return ENOSPC; memcpy(&mesh_proto_metrics[firstempty], mpm, sizeof(*mpm)); mesh_proto_metrics[firstempty].mpm_active = 1; return 0; } static int mesh_select_proto_path(struct ieee80211vap *vap, const char *name) { struct ieee80211_mesh_state *ms = vap->iv_mesh; int i; for (i = 0; i < nitems(mesh_proto_paths); i++) { if (strcasecmp(mesh_proto_paths[i].mpp_descr, name) == 0) { ms->ms_ppath = &mesh_proto_paths[i]; return 0; } } return ENOENT; } static int mesh_select_proto_metric(struct ieee80211vap *vap, const char *name) { struct ieee80211_mesh_state *ms = vap->iv_mesh; int i; for (i = 0; i < nitems(mesh_proto_metrics); i++) { if (strcasecmp(mesh_proto_metrics[i].mpm_descr, name) == 0) { ms->ms_pmetric = &mesh_proto_metrics[i]; return 0; } } return ENOENT; } static void mesh_gatemode_setup(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; /* * NB: When a mesh gate is running as a ROOT it shall * not send out periodic GANNs but instead mark the * mesh gate flag for the corresponding proactive PREQ * and RANN frames. */ if (ms->ms_flags & IEEE80211_MESHFLAGS_ROOT || (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) == 0) { callout_drain(&ms->ms_gatetimer); return ; } callout_reset(&ms->ms_gatetimer, ieee80211_mesh_gateint, mesh_gatemode_cb, vap); } static void mesh_gatemode_cb(void *arg) { struct ieee80211vap *vap = (struct ieee80211vap *)arg; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshgann_ie gann; gann.gann_flags = 0; /* Reserved */ gann.gann_hopcount = 0; gann.gann_ttl = ms->ms_ttl; IEEE80211_ADDR_COPY(gann.gann_addr, vap->iv_myaddr); gann.gann_seq = ms->ms_gateseq++; gann.gann_interval = ieee80211_mesh_gateint; IEEE80211_NOTE(vap, IEEE80211_MSG_MESH, vap->iv_bss, "send broadcast GANN (seq %u)", gann.gann_seq); ieee80211_send_action(vap->iv_bss, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, &gann); mesh_gatemode_setup(vap); } static void ieee80211_mesh_init(void) { memset(mesh_proto_paths, 0, sizeof(mesh_proto_paths)); memset(mesh_proto_metrics, 0, sizeof(mesh_proto_metrics)); /* * Setup mesh parameters that depends on the clock frequency. */ ieee80211_mesh_gateint = msecs_to_ticks(10000); ieee80211_mesh_retrytimeout = msecs_to_ticks(40); ieee80211_mesh_holdingtimeout = msecs_to_ticks(40); ieee80211_mesh_confirmtimeout = msecs_to_ticks(40); ieee80211_mesh_backofftimeout = msecs_to_ticks(5000); /* * Register action frame handlers. */ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, mesh_recv_action_meshpeering_open); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, mesh_recv_action_meshpeering_confirm); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, mesh_recv_action_meshpeering_close); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, mesh_recv_action_meshlmetric); ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, mesh_recv_action_meshgate); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, mesh_send_action_meshpeering_open); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, mesh_send_action_meshpeering_confirm); ieee80211_send_action_register(IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, mesh_send_action_meshpeering_close); ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, mesh_send_action_meshlmetric); ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, mesh_send_action_meshgate); /* * Register Airtime Link Metric. */ ieee80211_mesh_register_proto_metric(&mesh_metric_airtime); } SYSINIT(wlan_mesh, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_mesh_init, NULL); void ieee80211_mesh_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_MBSS] = mesh_vattach; } void ieee80211_mesh_detach(struct ieee80211com *ic) { } static void mesh_vdetach_peers(void *arg, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; uint16_t args[3]; if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); } callout_drain(&ni->ni_mltimer); /* XXX belongs in hwmp */ ieee80211_ageq_drain_node(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr)); } static void mesh_vdetach(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; callout_drain(&ms->ms_cleantimer); ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_vdetach_peers, NULL); ieee80211_mesh_rt_flush(vap); MESH_RT_LOCK_DESTROY(ms); ms->ms_ppath->mpp_vdetach(vap); IEEE80211_FREE(vap->iv_mesh, M_80211_VAP); vap->iv_mesh = NULL; } static void mesh_vattach(struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms; vap->iv_newstate = mesh_newstate; vap->iv_input = mesh_input; vap->iv_opdetach = mesh_vdetach; vap->iv_recv_mgmt = mesh_recv_mgmt; vap->iv_recv_ctl = mesh_recv_ctl; ms = IEEE80211_MALLOC(sizeof(struct ieee80211_mesh_state), M_80211_VAP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (ms == NULL) { printf("%s: couldn't alloc MBSS state\n", __func__); return; } vap->iv_mesh = ms; ms->ms_seq = 0; ms->ms_flags = (IEEE80211_MESHFLAGS_AP | IEEE80211_MESHFLAGS_FWD); ms->ms_ttl = IEEE80211_MESH_DEFAULT_TTL; TAILQ_INIT(&ms->ms_known_gates); TAILQ_INIT(&ms->ms_routes); MESH_RT_LOCK_INIT(ms, "MBSS"); callout_init(&ms->ms_cleantimer, 1); callout_init(&ms->ms_gatetimer, 1); ms->ms_gateseq = 0; mesh_select_proto_metric(vap, "AIRTIME"); KASSERT(ms->ms_pmetric, ("ms_pmetric == NULL")); mesh_select_proto_path(vap, "HWMP"); KASSERT(ms->ms_ppath, ("ms_ppath == NULL")); ms->ms_ppath->mpp_vattach(vap); } /* * IEEE80211_M_MBSS vap state machine handler. */ static int mesh_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; enum ieee80211_state ostate; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg); vap->iv_state = nstate; /* state transition */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ ni = vap->iv_bss; /* NB: no reference held */ if (nstate != IEEE80211_S_RUN && ostate == IEEE80211_S_RUN) { callout_drain(&ms->ms_cleantimer); callout_drain(&ms->ms_gatetimer); } switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; case IEEE80211_S_CAC: ieee80211_dfs_cac_stop(vap); break; case IEEE80211_S_RUN: ieee80211_iterate_nodes(&ic->ic_sta, mesh_vdetach_peers, NULL); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); ieee80211_mesh_rt_flush(vap); } break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_INIT: if (vap->iv_des_chan != IEEE80211_CHAN_ANYC && !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan) && ms->ms_idlen != 0) { /* * Already have a channel and a mesh ID; bypass * the scan and startup immediately. */ ieee80211_create_ibss(vap, vap->iv_des_chan); break; } /* * Initiate a scan. We can come here as a result * of an IEEE80211_IOC_SCAN_REQ too in which case * the vap will be marked with IEEE80211_FEXT_SCANREQ * and the scan request parameters will be present * in iv_scanreq. Otherwise we do the default. */ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) { ieee80211_check_scan(vap, vap->iv_scanreq_flags, vap->iv_scanreq_duration, vap->iv_scanreq_mindwell, vap->iv_scanreq_maxdwell, vap->iv_scanreq_nssid, vap->iv_scanreq_ssid); vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; } else ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_CAC: /* * Start CAC on a DFS channel. We come here when starting * a bss on a DFS channel (see ieee80211_create_ibss). */ ieee80211_dfs_cac_start(vap); break; case IEEE80211_S_RUN: switch (ostate) { case IEEE80211_S_INIT: /* * Already have a channel; bypass the * scan and startup immediately. * Note that ieee80211_create_ibss will call * back to do a RUN->RUN state change. */ ieee80211_create_ibss(vap, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht)); /* NB: iv_bss is changed on return */ break; case IEEE80211_S_CAC: /* * NB: This is the normal state change when CAC * expires and no radar was detected; no need to * clear the CAC timer as it's already expired. */ /* fall thru... */ case IEEE80211_S_CSA: #if 0 /* * Shorten inactivity timer of associated stations * to weed out sta's that don't follow a CSA. */ ieee80211_iterate_nodes(&ic->ic_sta, sta_csa, vap); #endif /* * Update bss node channel to reflect where * we landed after CSA. */ ieee80211_node_set_chan(ni, ieee80211_ht_adjust_channel(ic, ic->ic_curchan, ieee80211_htchanflags(ni->ni_chan))); /* XXX bypass debug msgs */ break; case IEEE80211_S_SCAN: case IEEE80211_S_RUN: #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap)) { ieee80211_note(vap, "synchronized with %s meshid ", ether_sprintf(ni->ni_meshid)); ieee80211_print_essid(ni->ni_meshid, ni->ni_meshidlen); /* XXX MCS/HT */ printf(" channel %d\n", ieee80211_chan2ieee(ic, ic->ic_curchan)); } #endif break; default: break; } ieee80211_node_authorize(ni); callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact, mesh_rt_cleanup_cb, vap); mesh_gatemode_setup(vap); break; default: break; } /* NB: ostate not nstate */ ms->ms_ppath->mpp_newstate(vap, ostate, arg); return 0; } static void mesh_rt_cleanup_cb(void *arg) { struct ieee80211vap *vap = arg; struct ieee80211_mesh_state *ms = vap->iv_mesh; mesh_rt_flush_invalid(vap); callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact, mesh_rt_cleanup_cb, vap); } /* * Mark a mesh STA as gate and return a pointer to it. * If this is first time, we create a new gate route. * Always update the path route to this mesh gate. */ struct ieee80211_mesh_gate_route * ieee80211_mesh_mark_gate(struct ieee80211vap *vap, const uint8_t *addr, struct ieee80211_mesh_route *rt) { struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_gate_route *gr = NULL, *next; int found = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, next) { if (IEEE80211_ADDR_EQ(gr->gr_addr, addr)) { found = 1; break; } } if (!found) { /* New mesh gate add it to known table. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, addr, "%s", "stored new gate information from pro-PREQ."); gr = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_gate_route)), M_80211_MESH_GT_RT, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); IEEE80211_ADDR_COPY(gr->gr_addr, addr); TAILQ_INSERT_TAIL(&ms->ms_known_gates, gr, gr_next); } gr->gr_route = rt; /* TODO: link from path route to gate route */ MESH_RT_UNLOCK(ms); return gr; } /* * Helper function to note the Mesh Peer Link FSM change. */ static void mesh_linkchange(struct ieee80211_node *ni, enum ieee80211_mesh_mlstate state) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; #ifdef IEEE80211_DEBUG static const char *meshlinkstates[] = { [IEEE80211_NODE_MESH_IDLE] = "IDLE", [IEEE80211_NODE_MESH_OPENSNT] = "OPEN SENT", [IEEE80211_NODE_MESH_OPENRCV] = "OPEN RECEIVED", [IEEE80211_NODE_MESH_CONFIRMRCV] = "CONFIRM RECEIVED", [IEEE80211_NODE_MESH_ESTABLISHED] = "ESTABLISHED", [IEEE80211_NODE_MESH_HOLDING] = "HOLDING" }; #endif IEEE80211_NOTE(vap, IEEE80211_MSG_MESH, ni, "peer link: %s -> %s", meshlinkstates[ni->ni_mlstate], meshlinkstates[state]); /* track neighbor count */ if (state == IEEE80211_NODE_MESH_ESTABLISHED && ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) { KASSERT(ms->ms_neighbors < 65535, ("neighbor count overflow")); ms->ms_neighbors++; ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF); } else if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED && state != IEEE80211_NODE_MESH_ESTABLISHED) { KASSERT(ms->ms_neighbors > 0, ("neighbor count 0")); ms->ms_neighbors--; ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF); } ni->ni_mlstate = state; switch (state) { case IEEE80211_NODE_MESH_HOLDING: ms->ms_ppath->mpp_peerdown(ni); break; case IEEE80211_NODE_MESH_ESTABLISHED: ieee80211_mesh_discover(vap, ni->ni_macaddr, NULL); break; default: break; } } /* * Helper function to generate a unique local ID required for mesh * peer establishment. */ static void mesh_checkid(void *arg, struct ieee80211_node *ni) { uint16_t *r = arg; if (*r == ni->ni_mllid) *(uint16_t *)arg = 0; } static uint32_t mesh_generateid(struct ieee80211vap *vap) { int maxiter = 4; uint16_t r; do { net80211_get_random_bytes(&r, 2); ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_checkid, &r); maxiter--; } while (r == 0 && maxiter > 0); return r; } /* * Verifies if we already received this packet by checking its * sequence number. * Returns 0 if the frame is to be accepted, 1 otherwise. */ static int mesh_checkpseq(struct ieee80211vap *vap, const uint8_t source[IEEE80211_ADDR_LEN], uint32_t seq) { struct ieee80211_mesh_route *rt; rt = ieee80211_mesh_rt_find(vap, source); if (rt == NULL) { rt = ieee80211_mesh_rt_add(vap, source); if (rt == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source, "%s", "add mcast route failed"); vap->iv_stats.is_mesh_rtaddfailed++; return 1; } IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source, "add mcast route, mesh seqno %d", seq); rt->rt_lastmseq = seq; return 0; } if (IEEE80211_MESH_SEQ_GEQ(rt->rt_lastmseq, seq)) { return 1; } else { rt->rt_lastmseq = seq; return 0; } } /* * Iterate the routing table and locate the next hop. */ struct ieee80211_node * ieee80211_mesh_find_txnode(struct ieee80211vap *vap, const uint8_t dest[IEEE80211_ADDR_LEN]) { struct ieee80211_mesh_route *rt; rt = ieee80211_mesh_rt_find(vap, dest); if (rt == NULL) return NULL; if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s: !valid, flags 0x%x", __func__, rt->rt_flags); /* XXX stat */ return NULL; } if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { rt = ieee80211_mesh_rt_find(vap, rt->rt_mesh_gate); if (rt == NULL) return NULL; if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest, "%s: meshgate !valid, flags 0x%x", __func__, rt->rt_flags); /* XXX stat */ return NULL; } } return ieee80211_find_txnode(vap, rt->rt_nexthop); } static void mesh_transmit_to_gate(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_mesh_route *rt_gate) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); ni = ieee80211_mesh_find_txnode(vap, rt_gate->rt_dest); if (ni == NULL) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); return; } /* * Send through the VAP packet transmit path. * This consumes the node ref grabbed above and * the mbuf, regardless of whether there's a problem * or not. */ (void) ieee80211_vap_pkt_send_dest(vap, m, ni); } /* * Forward the queued frames to known valid mesh gates. * Assume destination to be outside the MBSS (i.e. proxy entry), * If no valid mesh gates are known silently discard queued frames. * After transmitting frames to all known valid mesh gates, this route * will be marked invalid, and a new path discovery will happen in the hopes * that (at least) one of the mesh gates have a new proxy entry for us to use. */ void ieee80211_mesh_forward_to_gates(struct ieee80211vap *vap, struct ieee80211_mesh_route *rt_dest) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt_gate; struct ieee80211_mesh_gate_route *gr = NULL, *gr_next; struct mbuf *m, *mcopy, *next; IEEE80211_TX_UNLOCK_ASSERT(ic); KASSERT( rt_dest->rt_flags == IEEE80211_MESHRT_FLAGS_DISCOVER, ("Route is not marked with IEEE80211_MESHRT_FLAGS_DISCOVER")); /* XXX: send to more than one valid mash gate */ MESH_RT_LOCK(ms); m = ieee80211_ageq_remove(&ic->ic_stageq, (struct ieee80211_node *)(uintptr_t) ieee80211_mac_hash(ic, rt_dest->rt_dest)); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, gr_next) { rt_gate = gr->gr_route; if (rt_gate == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, rt_dest->rt_dest, "mesh gate with no path %6D", gr->gr_addr, ":"); continue; } if ((rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) continue; KASSERT(rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_GATE, ("route not marked as a mesh gate")); KASSERT((rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) == 0, ("found mesh gate that is also marked porxy")); /* * convert route to a proxy route gated by the current * mesh gate, this is needed so encap can built data * frame with correct address. */ rt_dest->rt_flags = IEEE80211_MESHRT_FLAGS_PROXY | IEEE80211_MESHRT_FLAGS_VALID; rt_dest->rt_ext_seq = 1; /* random value */ IEEE80211_ADDR_COPY(rt_dest->rt_mesh_gate, rt_gate->rt_dest); IEEE80211_ADDR_COPY(rt_dest->rt_nexthop, rt_gate->rt_nexthop); rt_dest->rt_metric = rt_gate->rt_metric; rt_dest->rt_nhops = rt_gate->rt_nhops; ieee80211_mesh_rt_update(rt_dest, ms->ms_ppath->mpp_inact); MESH_RT_UNLOCK(ms); /* XXX: lock?? */ - mcopy = m_dup(m, M_NOWAIT); + mcopy = m_dup(m, IEEE80211_M_NOWAIT); for (; mcopy != NULL; mcopy = next) { next = mcopy->m_nextpkt; mcopy->m_nextpkt = NULL; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, rt_dest->rt_dest, "flush queued frame %p len %d", mcopy, mcopy->m_pkthdr.len); mesh_transmit_to_gate(vap, mcopy, rt_gate); } MESH_RT_LOCK(ms); } rt_dest->rt_flags = 0; /* Mark invalid */ m_freem(m); MESH_RT_UNLOCK(ms); } /* * Forward the specified frame. * Decrement the TTL and set TA to our MAC address. */ static void mesh_forward(struct ieee80211vap *vap, struct mbuf *m, const struct ieee80211_meshcntl *mc) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ifnet *ifp = vap->iv_ifp; const struct ieee80211_frame *wh = mtod(m, const struct ieee80211_frame *); struct mbuf *mcopy; struct ieee80211_meshcntl *mccopy; struct ieee80211_frame *whcopy; struct ieee80211_node *ni; int err; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(ic); /* * mesh ttl of 1 means we are the last one receiving it, * according to amendment we decrement and then check if * 0, if so we dont forward. */ if (mc->mc_ttl < 1) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, ttl 1"); vap->iv_stats.is_mesh_fwd_ttl++; return; } if (!(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, fwding disabled"); vap->iv_stats.is_mesh_fwd_disabled++; return; } - mcopy = m_dup(m, M_NOWAIT); + mcopy = m_dup(m, IEEE80211_M_NOWAIT); if (mcopy == NULL) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, cannot dup"); vap->iv_stats.is_mesh_fwd_nobuf++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return; } mcopy = m_pullup(mcopy, ieee80211_hdrspace(ic, wh) + sizeof(struct ieee80211_meshcntl)); if (mcopy == NULL) { IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, too short"); vap->iv_stats.is_mesh_fwd_tooshort++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(mcopy); return; } whcopy = mtod(mcopy, struct ieee80211_frame *); mccopy = (struct ieee80211_meshcntl *) (mtod(mcopy, uint8_t *) + ieee80211_hdrspace(ic, wh)); /* XXX clear other bits? */ whcopy->i_fc[1] &= ~IEEE80211_FC1_RETRY; IEEE80211_ADDR_COPY(whcopy->i_addr2, vap->iv_myaddr); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { ni = ieee80211_ref_node(vap->iv_bss); mcopy->m_flags |= M_MCAST; } else { ni = ieee80211_mesh_find_txnode(vap, whcopy->i_addr3); if (ni == NULL) { /* * [Optional] any of the following three actions: * o silently discard * o trigger a path discovery * o inform TA that meshDA is unknown. */ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh, "%s", "frame not fwd'd, no path"); ms->ms_ppath->mpp_senderror(vap, whcopy->i_addr3, NULL, IEEE80211_REASON_MESH_PERR_NO_FI); vap->iv_stats.is_mesh_fwd_nopath++; m_freem(mcopy); return; } IEEE80211_ADDR_COPY(whcopy->i_addr1, ni->ni_macaddr); } KASSERT(mccopy->mc_ttl > 0, ("%s called with wrong ttl", __func__)); mccopy->mc_ttl--; /* XXX calculate priority so drivers can find the tx queue */ M_WME_SETAC(mcopy, WME_AC_BE); /* XXX do we know m_nextpkt is NULL? */ MPASS((mcopy->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); mcopy->m_pkthdr.rcvif = (void *) ni; /* * XXX this bypasses all of the VAP TX handling; it passes frames * directly to the parent interface. * * Because of this, there's no TX lock being held as there's no * encaps state being used. * * Doing a direct parent transmit may not be the correct thing * to do here; we'll have to re-think this soon. */ IEEE80211_TX_LOCK(ic); err = ieee80211_parent_xmitpkt(ic, mcopy); IEEE80211_TX_UNLOCK(ic); if (!err) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } static struct mbuf * mesh_decap(struct ieee80211vap *vap, struct mbuf *m, int hdrlen, int meshdrlen) { #define WHDIR(wh) ((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) uint8_t b[sizeof(struct ieee80211_qosframe_addr4) + sizeof(struct ieee80211_meshcntl_ae10)]; const struct ieee80211_qosframe_addr4 *wh; const struct ieee80211_meshcntl_ae10 *mc; struct ether_header *eh; struct llc *llc; int ae; if (m->m_len < hdrlen + sizeof(*llc) && (m = m_pullup(m, hdrlen + sizeof(*llc))) == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "discard data frame: %s", "m_pullup failed"); vap->iv_stats.is_rx_tooshort++; return NULL; } memcpy(b, mtod(m, caddr_t), hdrlen); wh = (const struct ieee80211_qosframe_addr4 *)&b[0]; mc = (const struct ieee80211_meshcntl_ae10 *)&b[hdrlen - meshdrlen]; KASSERT(WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS || WHDIR(wh) == IEEE80211_FC1_DIR_DSTODS, ("bogus dir, fc 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1])); llc = (struct llc *)(mtod(m, caddr_t) + hdrlen); if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 && llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0 && /* NB: preserve AppleTalk frames that have a native SNAP hdr */ !(llc->llc_snap.ether_type == htons(ETHERTYPE_AARP) || llc->llc_snap.ether_type == htons(ETHERTYPE_IPX))) { m_adj(m, hdrlen + sizeof(struct llc) - sizeof(*eh)); llc = NULL; } else { m_adj(m, hdrlen - sizeof(*eh)); } eh = mtod(m, struct ether_header *); ae = mc->mc_flags & IEEE80211_MESH_AE_MASK; if (WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS) { IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr1); if (ae == IEEE80211_MESH_AE_00) { IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr3); } else if (ae == IEEE80211_MESH_AE_01) { IEEE80211_ADDR_COPY(eh->ether_shost, MC01(mc)->mc_addr4); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, (const struct ieee80211_frame *)wh, NULL, "bad AE %d", ae); vap->iv_stats.is_mesh_badae++; m_freem(m); return NULL; } } else { if (ae == IEEE80211_MESH_AE_00) { IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr4); } else if (ae == IEEE80211_MESH_AE_10) { IEEE80211_ADDR_COPY(eh->ether_dhost, mc->mc_addr5); IEEE80211_ADDR_COPY(eh->ether_shost, mc->mc_addr6); } else { IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, (const struct ieee80211_frame *)wh, NULL, "bad AE %d", ae); vap->iv_stats.is_mesh_badae++; m_freem(m); return NULL; } } #ifndef __NO_STRICT_ALIGNMENT if (!ALIGNED_POINTER(mtod(m, caddr_t) + sizeof(*eh), uint32_t)) { m = ieee80211_realign(vap, m, sizeof(*eh)); if (m == NULL) return NULL; } #endif /* !__NO_STRICT_ALIGNMENT */ if (llc != NULL) { eh = mtod(m, struct ether_header *); eh->ether_type = htons(m->m_pkthdr.len - sizeof(*eh)); } return m; #undef WDIR #undef MC01 } /* * Return non-zero if the unicast mesh data frame should be processed * locally. Frames that are not proxy'd have our address, otherwise * we need to consult the routing table to look for a proxy entry. */ static __inline int mesh_isucastforme(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { int ae = mc->mc_flags & 3; KASSERT((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS, ("bad dir 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1])); KASSERT(ae == IEEE80211_MESH_AE_00 || ae == IEEE80211_MESH_AE_10, ("bad AE %d", ae)); if (ae == IEEE80211_MESH_AE_10) { /* ucast w/ proxy */ const struct ieee80211_meshcntl_ae10 *mc10 = (const struct ieee80211_meshcntl_ae10 *) mc; struct ieee80211_mesh_route *rt = ieee80211_mesh_rt_find(vap, mc10->mc_addr5); /* check for proxy route to ourself */ return (rt != NULL && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY)); } else /* ucast w/o proxy */ return IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr); } /* * Verifies transmitter, updates lifetime, precursor list and forwards data. * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_indiv_data_to_fwrd(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { struct ieee80211_qosframe_addr4 *qwh; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt_meshda, *rt_meshsa; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); qwh = (struct ieee80211_qosframe_addr4 *)wh; /* * TODO: * o verify addr2 is a legitimate transmitter * o lifetime of precursor of addr3 (addr2) is max(init, curr) * o lifetime of precursor of addr4 (nexthop) is max(init, curr) */ /* set lifetime of addr3 (meshDA) to initial value */ rt_meshda = ieee80211_mesh_rt_find(vap, qwh->i_addr3); if (rt_meshda == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, qwh->i_addr2, "no route to meshDA(%6D)", qwh->i_addr3, ":"); /* * [Optional] any of the following three actions: * o silently discard [X] * o trigger a path discovery [ ] * o inform TA that meshDA is unknown. [ ] */ /* XXX: stats */ return (-1); } ieee80211_mesh_rt_update(rt_meshda, ticks_to_msecs( ms->ms_ppath->mpp_inact)); /* set lifetime of addr4 (meshSA) to initial value */ rt_meshsa = ieee80211_mesh_rt_find(vap, qwh->i_addr4); KASSERT(rt_meshsa != NULL, ("no route")); ieee80211_mesh_rt_update(rt_meshsa, ticks_to_msecs( ms->ms_ppath->mpp_inact)); mesh_forward(vap, m, mc); return (1); /* dont process locally */ } /* * Verifies transmitter, updates lifetime, precursor list and process data * locally, if data is proxy with AE = 10 it could mean data should go * on another mesh path or data should be forwarded to the DS. * * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_indiv_data_to_me(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { struct ieee80211_qosframe_addr4 *qwh; const struct ieee80211_meshcntl_ae10 *mc10; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_route *rt; int ae; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); qwh = (struct ieee80211_qosframe_addr4 *)wh; mc10 = (const struct ieee80211_meshcntl_ae10 *)mc; /* * TODO: * o verify addr2 is a legitimate transmitter * o lifetime of precursor entry is max(init, curr) */ /* set lifetime of addr4 (meshSA) to initial value */ rt = ieee80211_mesh_rt_find(vap, qwh->i_addr4); KASSERT(rt != NULL, ("no route")); ieee80211_mesh_rt_update(rt, ticks_to_msecs(ms->ms_ppath->mpp_inact)); rt = NULL; ae = mc10->mc_flags & IEEE80211_MESH_AE_MASK; KASSERT(ae == IEEE80211_MESH_AE_00 || ae == IEEE80211_MESH_AE_10, ("bad AE %d", ae)); if (ae == IEEE80211_MESH_AE_10) { if (IEEE80211_ADDR_EQ(mc10->mc_addr5, qwh->i_addr3)) { return (0); /* process locally */ } rt = ieee80211_mesh_rt_find(vap, mc10->mc_addr5); if (rt != NULL && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) && (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) == 0) { /* * Forward on another mesh-path, according to * amendment as specified in 9.32.4.1 */ IEEE80211_ADDR_COPY(qwh->i_addr3, mc10->mc_addr5); mesh_forward(vap, m, (const struct ieee80211_meshcntl *)mc10); return (1); /* dont process locally */ } /* * All other cases: forward of MSDUs from the MBSS to DS indiv. * addressed according to 13.11.3.2. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, qwh->i_addr2, "forward frame to DS, SA(%6D) DA(%6D)", mc10->mc_addr6, ":", mc10->mc_addr5, ":"); } return (0); /* process locally */ } /* * Try to forward the group addressed data on to other mesh STAs, and * also to the DS. * * > 0 means we have forwarded data and no need to process locally * == 0 means we want to process locally (and we may have forwarded data * < 0 means there was an error and data should be discarded */ static int mesh_recv_group_data(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_frame *wh, const struct ieee80211_meshcntl *mc) { #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211_mesh_state *ms = vap->iv_mesh; /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic); mesh_forward(vap, m, mc); if(mc->mc_ttl > 0) { if (mc->mc_flags & IEEE80211_MESH_AE_01) { /* * Forward of MSDUs from the MBSS to DS group addressed * (according to 13.11.3.2) * This happens by delivering the packet, and a bridge * will sent it on another port member. */ if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE && ms->ms_flags & IEEE80211_MESHFLAGS_FWD) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, MC01(mc)->mc_addr4, "%s", "forward from MBSS to the DS"); } } } return (0); /* process locally */ #undef MC01 } static int mesh_input(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { #define HAS_SEQ(type) ((type & 0x4) == 0) #define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; const struct ieee80211_meshcntl *mc; int hdrspace, meshdrlen, need_tap, error; uint8_t dir, type, subtype, ae; uint32_t seq; const uint8_t *addr; uint8_t qos[2]; KASSERT(ni != NULL, ("null node")); ni->ni_inact = ni->ni_inact_reload; need_tap = 1; /* mbuf need to be tapped. */ type = -1; /* undefined */ /* This is called from the RX path - don't hold this lock */ IEEE80211_TX_UNLOCK_ASSERT(ic); if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version %x", wh->i_fc[0]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (HAS_SEQ(type)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; if (! ieee80211_check_rxseq(ni, wh, wh->i_addr1, rxs)) goto out; } } #ifdef IEEE80211_DEBUG /* * It's easier, but too expensive, to simulate different mesh * topologies by consulting the ACL policy very early, so do this * only under DEBUG. * * NB: this check is also done upon peering link initiation. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; goto out; } #endif switch (type) { case IEEE80211_FC0_TYPE_DATA: if (ni == vap->iv_bss) goto out; if (ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "peer link not yet established (%d)", ni->ni_mlstate); vap->iv_stats.is_mesh_nolink++; goto out; } if (dir != IEEE80211_FC1_DIR_FROMDS && dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } /* All Mesh data frames are QoS subtype */ if (!HAS_SEQ(type)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect subtype 0x%x", subtype); vap->iv_stats.is_rx_badsubtype++; goto err; } /* * Next up, any fragmentation. * XXX: we defrag before we even try to forward, * Mesh Control field is not present in sub-sequent * fragmented frames. This is in contrast to Draft 4.0. */ hdrspace = ieee80211_hdrspace(ic, wh); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace, 0); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = mtod(m, struct ieee80211_frame *); /* NB: after defrag */ /* * Now we have a complete Mesh Data frame. */ /* * Only fromDStoDS data frames use 4 address qos frames * as specified in amendment. Otherwise addr4 is located * in the Mesh Control field and a 3 address qos frame * is used. */ *(uint16_t *)qos = *(uint16_t *)ieee80211_getqos(wh); /* * NB: The mesh STA sets the Mesh Control Present * subfield to 1 in the Mesh Data frame containing * an unfragmented MSDU, an A-MSDU, or the first * fragment of an MSDU. * After defrag it should always be present. */ if (!(qos[1] & IEEE80211_QOS_MC)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "%s", "Mesh control field not present"); vap->iv_stats.is_rx_elem_missing++; /* XXX: kinda */ goto err; } /* pull up enough to get to the mesh control */ if (m->m_len < hdrspace + sizeof(struct ieee80211_meshcntl) && (m = m_pullup(m, hdrspace + sizeof(struct ieee80211_meshcntl))) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } /* * Now calculate the full extent of the headers. Note * mesh_decap will pull up anything we didn't get * above when it strips the 802.11 headers. */ mc = (const struct ieee80211_meshcntl *) (mtod(m, const uint8_t *) + hdrspace); ae = mc->mc_flags & IEEE80211_MESH_AE_MASK; meshdrlen = sizeof(struct ieee80211_meshcntl) + ae * IEEE80211_ADDR_LEN; hdrspace += meshdrlen; /* pull complete hdrspace = ieee80211_hdrspace + meshcontrol */ if ((meshdrlen > sizeof(struct ieee80211_meshcntl)) && (m->m_len < hdrspace) && ((m = m_pullup(m, hdrspace)) == NULL)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } /* XXX: are we sure there is no reallocating after m_pullup? */ seq = le32dec(mc->mc_seq); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) addr = wh->i_addr3; else if (ae == IEEE80211_MESH_AE_01) addr = MC01(mc)->mc_addr4; else addr = ((struct ieee80211_qosframe_addr4 *)wh)->i_addr4; if (IEEE80211_ADDR_EQ(vap->iv_myaddr, addr)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, addr, "data", "%s", "not to me"); vap->iv_stats.is_rx_wrongbss++; /* XXX kinda */ goto out; } if (mesh_checkpseq(vap, addr, seq) != 0) { vap->iv_stats.is_rx_dup++; goto out; } /* This code "routes" the frame to the right control path */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr3)) error = mesh_recv_indiv_data_to_me(vap, m, wh, mc); else if (IEEE80211_IS_MULTICAST(wh->i_addr3)) error = mesh_recv_group_data(vap, m, wh, mc); else error = mesh_recv_indiv_data_to_fwrd(vap, m, wh, mc); } else error = mesh_recv_group_data(vap, m, wh, mc); if (error < 0) goto err; else if (error > 0) goto out; if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = mesh_decap(vap, m, hdrspace, meshdrlen); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } if (qos[0] & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } ieee80211_deliver_data(vap, ni, m); return type; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "mgt", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if ((ieee80211_msg_debug(vap) && (vap->iv_ic->ic_flags & IEEE80211_F_SCAN)) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name(subtype), ether_sprintf(wh->i_addr2), rssi); } #endif if (IEEE80211_IS_PROTECTED(wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } vap->iv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; #undef HAS_SEQ #undef MC01 } static void mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_channel *rxchan = ic->ic_curchan; struct ieee80211_frame *wh; struct ieee80211_mesh_route *rt; uint8_t *frm, *efrm; wh = mtod(m0, struct ieee80211_frame *); frm = (uint8_t *)&wh[1]; efrm = mtod(m0, uint8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_BEACON: { struct ieee80211_scanparams scan; struct ieee80211_channel *c; /* * We process beacon/probe response * frames to discover neighbors. */ if (rxs != NULL) { c = ieee80211_lookup_channel_rxstatus(vap, rxs); if (c != NULL) rxchan = c; } if (ieee80211_parse_beacon(ni, m0, rxchan, &scan) != 0) return; /* * Count frame now that we know it's to be processed. */ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { vap->iv_stats.is_rx_beacon++; /* XXX remove */ IEEE80211_NODE_STAT(ni, rx_beacons); } else IEEE80211_NODE_STAT(ni, rx_proberesp); /* * If scanning, just pass information to the scan module. */ if (ic->ic_flags & IEEE80211_F_SCAN) { if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) { /* * Actively scanning a channel marked passive; * send a probe request now that we know there * is 802.11 traffic present. * * XXX check if the beacon we recv'd gives * us what we need and suppress the probe req */ ieee80211_probe_curchan(vap, 1); ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN; } ieee80211_add_scan(vap, rxchan, &scan, wh, subtype, rssi, nf); return; } /* The rest of this code assumes we are running */ if (vap->iv_state != IEEE80211_S_RUN) return; /* * Ignore non-mesh STAs. */ if ((scan.capinfo & (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) || scan.meshid == NULL || scan.meshconf == NULL) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "beacon", "%s", "not a mesh sta"); vap->iv_stats.is_mesh_wrongmesh++; return; } /* * Ignore STAs for other mesh networks. */ if (memcmp(scan.meshid+2, ms->ms_id, ms->ms_idlen) != 0 || mesh_verify_meshconf(vap, scan.meshconf)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "beacon", "%s", "not for our mesh"); vap->iv_stats.is_mesh_wrongmesh++; return; } /* * Peer only based on the current ACL policy. */ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL, wh, NULL, "%s", "disallowed by ACL"); vap->iv_stats.is_rx_acl++; return; } /* * Do neighbor discovery. */ if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) { /* * Create a new entry in the neighbor table. */ ni = ieee80211_add_neighbor(vap, wh, &scan); } /* * Automatically peer with discovered nodes if possible. */ if (ni != vap->iv_bss && (ms->ms_flags & IEEE80211_MESHFLAGS_AP)) { switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: { uint16_t args[1]; /* Wait for backoff callout to reset counter */ if (ni->ni_mlhcnt >= ieee80211_mesh_maxholding) return; ni->ni_mlpid = mesh_generateid(vap); if (ni->ni_mlpid == 0) return; mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENSNT); args[0] = ni->ni_mlpid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); ni->ni_mlrcnt = 0; mesh_peer_timeout_setup(ni); break; } case IEEE80211_NODE_MESH_ESTABLISHED: { /* * Valid beacon from a peer mesh STA * bump TA lifetime */ rt = ieee80211_mesh_rt_find(vap, wh->i_addr2); if(rt != NULL) { ieee80211_mesh_rt_update(rt, ticks_to_msecs( ms->ms_ppath->mpp_inact)); } break; } default: break; /* ignore */ } } break; } case IEEE80211_FC0_SUBTYPE_PROBE_REQ: { uint8_t *ssid, *meshid, *rates, *xrates; if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; return; } if (IEEE80211_IS_MULTICAST(wh->i_addr2)) { /* frame must be directed */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not unicast"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */ return; } /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [tlv] mesh id */ ssid = meshid = rates = xrates = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_SSID: ssid = frm; break; case IEEE80211_ELEMID_RATES: rates = frm; break; case IEEE80211_ELEMID_XRATES: xrates = frm; break; case IEEE80211_ELEMID_MESHID: meshid = frm; break; } frm += frm[1] + 2; } IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return); IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return); if (xrates != NULL) IEEE80211_VERIFY_ELEMENT(xrates, IEEE80211_RATE_MAXSIZE - rates[1], return); if (meshid != NULL) { IEEE80211_VERIFY_ELEMENT(meshid, IEEE80211_MESHID_LEN, return); /* NB: meshid, not ssid */ IEEE80211_VERIFY_SSID(vap->iv_bss, meshid, return); } /* XXX find a better class or define it's own */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2, "%s", "recv probe req"); /* * Some legacy 11b clients cannot hack a complete * probe response frame. When the request includes * only a bare-bones rate set, communicate this to * the transmit side. */ ieee80211_send_proberesp(vap, wh->i_addr2, 0); break; } case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } } static void mesh_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_BAR: ieee80211_recv_bar(ni, m); break; } } /* * Parse meshpeering action ie's for MPM frames */ static const struct ieee80211_meshpeer_ie * mesh_parse_meshpeering_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, /* XXX for VERIFY_LENGTH */ const uint8_t *frm, const uint8_t *efrm, struct ieee80211_meshpeer_ie *mp, uint8_t subtype) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_meshpeer_ie *mpie; uint16_t args[3]; const uint8_t *meshid, *meshconf; uint8_t sendclose = 0; /* 1 = MPM frame rejected, close will be sent */ meshid = meshconf = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return NULL); switch (*frm) { case IEEE80211_ELEMID_MESHID: meshid = frm; break; case IEEE80211_ELEMID_MESHCONF: meshconf = frm; break; case IEEE80211_ELEMID_MESHPEER: mpie = (const struct ieee80211_meshpeer_ie *) frm; memset(mp, 0, sizeof(*mp)); mp->peer_len = mpie->peer_len; mp->peer_proto = le16dec(&mpie->peer_proto); mp->peer_llinkid = le16dec(&mpie->peer_llinkid); switch (subtype) { case IEEE80211_ACTION_MESHPEERING_CONFIRM: mp->peer_linkid = le16dec(&mpie->peer_linkid); break; case IEEE80211_ACTION_MESHPEERING_CLOSE: /* NB: peer link ID is optional */ if (mpie->peer_len == (IEEE80211_MPM_BASE_SZ + 2)) { mp->peer_linkid = 0; mp->peer_rcode = le16dec(&mpie->peer_linkid); } else { mp->peer_linkid = le16dec(&mpie->peer_linkid); mp->peer_rcode = le16dec(&mpie->peer_rcode); } break; } break; } frm += frm[1] + 2; } /* * Verify the contents of the frame. * If it fails validation, close the peer link. */ if (mesh_verify_meshpeer(vap, subtype, (const uint8_t *)mp)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "MPM validation failed"); } /* If meshid is not the same reject any frames type. */ if (sendclose == 0 && mesh_verify_meshid(vap, meshid)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "not for our mesh"); if (subtype == IEEE80211_ACTION_MESHPEERING_CLOSE) { /* * Standard not clear about this, if we dont ignore * there will be an endless loop between nodes sending * CLOSE frames between each other with wrong meshid. * Discard and timers will bring FSM to IDLE state. */ return NULL; } } /* * Close frames are accepted if meshid is the same. * Verify the other two types. */ if (sendclose == 0 && subtype != IEEE80211_ACTION_MESHPEERING_CLOSE && mesh_verify_meshconf(vap, meshconf)) { sendclose = 1; IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "%s", "configuration missmatch"); } if (sendclose) { vap->iv_stats.is_rx_mgtdiscard++; switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: case IEEE80211_NODE_MESH_ESTABLISHED: case IEEE80211_NODE_MESH_HOLDING: /* ignore */ break; case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_OPENRCV: case IEEE80211_NODE_MESH_CONFIRMRCV: args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; /* Reason codes for rejection */ switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: args[2] = IEEE80211_REASON_MESH_CPVIOLATION; break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: args[2] = IEEE80211_REASON_MESH_INCONS_PARAMS; break; } ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } return NULL; } return (const struct ieee80211_meshpeer_ie *) mp; } static int mesh_recv_action_meshpeering_open(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2+2 for action + code + capabilites */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_OPEN); if (meshpeer == NULL) { return 0; } /* XXX move up */ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "recv PEER OPEN, lid 0x%x", meshpeer->peer_llinkid); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: /* Reject open request if reached our maximum neighbor count */ if (ms->ms_neighbors >= IEEE80211_MESH_MAX_NEIGHBORS) { args[0] = meshpeer->peer_llinkid; args[1] = 0; args[2] = IEEE80211_REASON_MESH_MAX_PEERS; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); /* stay in IDLE state */ return (0); } /* Open frame accepted */ mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV); ni->ni_mllid = meshpeer->peer_llinkid; ni->ni_mlpid = mesh_generateid(vap); if (ni->ni_mlpid == 0) return 0; /* XXX */ args[0] = ni->ni_mlpid; /* Announce we're open too... */ ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); /* ...and confirm the link. */ args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_OPENRCV: /* Wrong Link ID */ if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mllid; args[1] = ni->ni_mlpid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } /* Duplicate open, confirm again. */ args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); break; case IEEE80211_NODE_MESH_OPENSNT: ni->ni_mllid = meshpeer->peer_llinkid; mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV); args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); /* NB: don't setup/clear any timeout */ break; case IEEE80211_NODE_MESH_CONFIRMRCV: if (ni->ni_mlpid != meshpeer->peer_linkid || ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED); ni->ni_mllid = meshpeer->peer_llinkid; args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); mesh_peer_timeout_stop(ni); break; case IEEE80211_NODE_MESH_ESTABLISHED: if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mllid; args[1] = ni->ni_mlpid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; } args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CONFIRM, args); break; case IEEE80211_NODE_MESH_HOLDING: args[0] = ni->ni_mlpid; args[1] = meshpeer->peer_llinkid; /* Standard not clear about what the reaason code should be */ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); break; } return 0; } static int mesh_recv_action_meshpeering_confirm(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2+2+2+2 for action + code + capabilites + status code + AID */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2+2+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_CONFIRM); if (meshpeer == NULL) { return 0; } IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "recv PEER CONFIRM, local id 0x%x, peer id 0x%x", meshpeer->peer_llinkid, meshpeer->peer_linkid); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_OPENRCV: mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED); mesh_peer_timeout_stop(ni); break; case IEEE80211_NODE_MESH_OPENSNT: mesh_linkchange(ni, IEEE80211_NODE_MESH_CONFIRMRCV); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: args[0] = ni->ni_mlpid; args[1] = meshpeer->peer_llinkid; /* Standard not clear about what the reaason code should be */ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); break; case IEEE80211_NODE_MESH_CONFIRMRCV: if (ni->ni_mllid != meshpeer->peer_llinkid) { args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_PEER_LINK_CANCELED; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); } break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, wh, NULL, "received confirm in invalid state %d", ni->ni_mlstate); vap->iv_stats.is_rx_mgtdiscard++; break; } return 0; } static int mesh_recv_action_meshpeering_close(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211_meshpeer_ie ie; const struct ieee80211_meshpeer_ie *meshpeer; uint16_t args[3]; /* +2 for action + code */ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2, efrm, &ie, IEEE80211_ACTION_MESHPEERING_CLOSE); if (meshpeer == NULL) { return 0; } /* * XXX: check reason code, for example we could receive * IEEE80211_REASON_MESH_MAX_PEERS then we should not attempt * to peer again. */ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "%s", "recv PEER CLOSE"); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: /* ignore */ break; case IEEE80211_NODE_MESH_OPENRCV: case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_CONFIRMRCV: case IEEE80211_NODE_MESH_ESTABLISHED: args[0] = ni->ni_mlpid; args[1] = ni->ni_mllid; args[2] = IEEE80211_REASON_MESH_CLOSE_RCVD; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE); mesh_peer_timeout_stop(ni); break; } return 0; } /* * Link Metric handling. */ static int mesh_recv_action_meshlmetric(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { const struct ieee80211_meshlmetric_ie *ie = (const struct ieee80211_meshlmetric_ie *) (frm+2); /* action + code */ struct ieee80211_meshlmetric_ie lm_rep; if (ie->lm_flags & IEEE80211_MESH_LMETRIC_FLAGS_REQ) { lm_rep.lm_flags = 0; lm_rep.lm_metric = mesh_airtime_calc(ni); ieee80211_send_action(ni, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_LMETRIC, &lm_rep); } /* XXX: else do nothing for now */ return 0; } /* * Parse meshgate action ie's for GANN frames. * Returns -1 if parsing fails, otherwise 0. */ static int mesh_parse_meshgate_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, /* XXX for VERIFY_LENGTH */ struct ieee80211_meshgann_ie *ie, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_meshgann_ie *gannie; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return -1); switch (*frm) { case IEEE80211_ELEMID_MESHGANN: gannie = (const struct ieee80211_meshgann_ie *) frm; memset(ie, 0, sizeof(*ie)); ie->gann_ie = gannie->gann_ie; ie->gann_len = gannie->gann_len; ie->gann_flags = gannie->gann_flags; ie->gann_hopcount = gannie->gann_hopcount; ie->gann_ttl = gannie->gann_ttl; IEEE80211_ADDR_COPY(ie->gann_addr, gannie->gann_addr); ie->gann_seq = le32dec(&gannie->gann_seq); ie->gann_interval = le16dec(&gannie->gann_interval); break; } frm += frm[1] + 2; } return 0; } /* * Mesh Gate Announcement handling. */ static int mesh_recv_action_meshgate(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_mesh_gate_route *gr, *next; struct ieee80211_mesh_route *rt_gate; struct ieee80211_meshgann_ie pgann; struct ieee80211_meshgann_ie ie; int found = 0; /* +2 for action + code */ if (mesh_parse_meshgate_action(ni, wh, &ie, frm+2, efrm) != 0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "%s", "GANN parsing failed"); vap->iv_stats.is_rx_mgtdiscard++; return (0); } if (IEEE80211_ADDR_EQ(vap->iv_myaddr, ie.gann_addr)) return 0; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, "received GANN, meshgate: %6D (seq %u)", ie.gann_addr, ":", ie.gann_seq); if (ms == NULL) return (0); MESH_RT_LOCK(ms); TAILQ_FOREACH_SAFE(gr, &ms->ms_known_gates, gr_next, next) { if (!IEEE80211_ADDR_EQ(gr->gr_addr, ie.gann_addr)) continue; if (ie.gann_seq <= gr->gr_lastseq) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH, ni->ni_macaddr, NULL, "GANN old seqno %u <= %u", ie.gann_seq, gr->gr_lastseq); MESH_RT_UNLOCK(ms); return (0); } /* corresponding mesh gate found & GANN accepted */ found = 1; break; } if (found == 0) { /* this GANN is from a new mesh Gate add it to known table. */ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ie.gann_addr, "stored new GANN information, seq %u.", ie.gann_seq); gr = IEEE80211_MALLOC(ALIGN(sizeof(struct ieee80211_mesh_gate_route)), M_80211_MESH_GT_RT, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); IEEE80211_ADDR_COPY(gr->gr_addr, ie.gann_addr); TAILQ_INSERT_TAIL(&ms->ms_known_gates, gr, gr_next); } gr->gr_lastseq = ie.gann_seq; /* check if we have a path to this gate */ rt_gate = mesh_rt_find_locked(ms, gr->gr_addr); if (rt_gate != NULL && rt_gate->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) { gr->gr_route = rt_gate; rt_gate->rt_flags |= IEEE80211_MESHRT_FLAGS_GATE; } MESH_RT_UNLOCK(ms); /* popagate only if decremented ttl >= 1 && forwarding is enabled */ if ((ie.gann_ttl - 1) < 1 && !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) return 0; pgann.gann_flags = ie.gann_flags; /* Reserved */ pgann.gann_hopcount = ie.gann_hopcount + 1; pgann.gann_ttl = ie.gann_ttl - 1; IEEE80211_ADDR_COPY(pgann.gann_addr, ie.gann_addr); pgann.gann_seq = ie.gann_seq; pgann.gann_interval = ie.gann_interval; IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, ie.gann_addr, "%s", "propagate GANN"); ieee80211_send_action(vap->iv_bss, IEEE80211_ACTION_CAT_MESH, IEEE80211_ACTION_MESH_GANN, &pgann); return 0; } static int mesh_send_action(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_bpf_params params; int ret; KASSERT(ni != NULL, ("null node")); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "Mesh action"); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(ni); m_freem(m); return EIO; /* XXX */ } - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); if (m == NULL) { ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION, IEEE80211_NONQOS_TID, sa, da, sa); m->m_flags |= M_ENCAP; /* mark encapsulated */ memset(¶ms, 0, sizeof(params)); params.ibp_pri = WME_AC_VO; params.ibp_rate0 = ni->ni_txparms->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) params.ibp_try0 = 1; else params.ibp_try0 = ni->ni_txparms->maxretry; params.ibp_power = ni->ni_txpower; IEEE80211_NODE_STAT(ni, tx_mgmt); ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); return (ret); } #define ADDSHORT(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = (v) >> 8; \ frm += 2; \ } while (0) #define ADDWORD(frm, v) do { \ frm[0] = (v) & 0xff; \ frm[1] = ((v) >> 8) & 0xff; \ frm[2] = ((v) >> 16) & 0xff; \ frm[3] = ((v) >> 24) & 0xff; \ frm += 4; \ } while (0) static int mesh_send_action_meshpeering_open(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER OPEN action: localid 0x%x", args[0]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* capabilites */ + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer open action frame format: * [1] category * [1] action * [2] capabilities * [tlv] rates * [tlv] xrates * [tlv] mesh id * [tlv] mesh conf * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan)); rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_OPEN, args[0], 0, 0); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshpeering_confirm(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER CONFIRM action: localid 0x%x, peerid 0x%x", args[0], args[1]); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* capabilites */ + sizeof(uint16_t) /* status code */ + sizeof(uint16_t) /* AID */ + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer confirm action frame format: * [1] category * [1] action * [2] capabilities * [2] status code * [2] association id (peer ID) * [tlv] rates * [tlv] xrates * [tlv] mesh id * [tlv] mesh conf * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan)); ADDSHORT(frm, 0); /* status code */ ADDSHORT(frm, args[1]); /* AID */ rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_CONFIRM, args[0], args[1], 0); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshpeering_close(struct ieee80211_node *ni, int category, int action, void *args0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; uint16_t *args = args0; struct mbuf *m; uint8_t *frm; IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send PEER CLOSE action: localid 0x%x, peerid 0x%x reason %d (%s)", args[0], args[1], args[2], ieee80211_reason_to_string(args[2])); IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) /* action+category */ + sizeof(uint16_t) /* reason code */ + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshpeer_ie) ); if (m != NULL) { /* * mesh peer close action frame format: * [1] category * [1] action * [tlv] mesh id * [tlv] mesh peer link mgmt */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshpeer(frm, IEEE80211_ACTION_MESHPEERING_CLOSE, args[0], args[1], args[2]); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshlmetric(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_meshlmetric_ie *ie = arg0; struct mbuf *m; uint8_t *frm; if (ie->lm_flags & IEEE80211_MESH_LMETRIC_FLAGS_REQ) { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "%s", "send LINK METRIC REQUEST action"); } else { IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni, "send LINK METRIC REPLY action: metric 0x%x", ie->lm_metric); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + /* action+category */ sizeof(struct ieee80211_meshlmetric_ie) ); if (m != NULL) { /* * mesh link metric * [1] category * [1] action * [tlv] mesh link metric */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshlmetric(frm, ie->lm_flags, ie->lm_metric); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, ni->ni_macaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static int mesh_send_action_meshgate(struct ieee80211_node *ni, int category, int action, void *arg0) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_meshgann_ie *ie = arg0; struct mbuf *m; uint8_t *frm; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + /* action+category */ IEEE80211_MESHGANN_BASE_SZ ); if (m != NULL) { /* * mesh link metric * [1] category * [1] action * [tlv] mesh gate announcement */ *frm++ = category; *frm++ = action; frm = ieee80211_add_meshgate(frm, ie); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return mesh_send_action(ni, vap->iv_myaddr, broadcastaddr, m); } else { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); return ENOMEM; } } static void mesh_peer_timeout_setup(struct ieee80211_node *ni) { switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_HOLDING: ni->ni_mltval = ieee80211_mesh_holdingtimeout; break; case IEEE80211_NODE_MESH_CONFIRMRCV: ni->ni_mltval = ieee80211_mesh_confirmtimeout; break; case IEEE80211_NODE_MESH_IDLE: ni->ni_mltval = 0; break; default: ni->ni_mltval = ieee80211_mesh_retrytimeout; break; } if (ni->ni_mltval) callout_reset(&ni->ni_mltimer, ni->ni_mltval, mesh_peer_timeout_cb, ni); } /* * Same as above but backoffs timer statisically 50%. */ static void mesh_peer_timeout_backoff(struct ieee80211_node *ni) { uint32_t r; r = arc4random(); ni->ni_mltval += r % ni->ni_mltval; callout_reset(&ni->ni_mltimer, ni->ni_mltval, mesh_peer_timeout_cb, ni); } static __inline void mesh_peer_timeout_stop(struct ieee80211_node *ni) { callout_drain(&ni->ni_mltimer); } static void mesh_peer_backoff_cb(void *arg) { struct ieee80211_node *ni = (struct ieee80211_node *)arg; /* After backoff timeout, try to peer automatically again. */ ni->ni_mlhcnt = 0; } /* * Mesh Peer Link Management FSM timeout handling. */ static void mesh_peer_timeout_cb(void *arg) { struct ieee80211_node *ni = (struct ieee80211_node *)arg; uint16_t args[3]; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_MESH, ni, "mesh link timeout, state %d, retry counter %d", ni->ni_mlstate, ni->ni_mlrcnt); switch (ni->ni_mlstate) { case IEEE80211_NODE_MESH_IDLE: case IEEE80211_NODE_MESH_ESTABLISHED: break; case IEEE80211_NODE_MESH_OPENSNT: case IEEE80211_NODE_MESH_OPENRCV: if (ni->ni_mlrcnt == ieee80211_mesh_maxretries) { args[0] = ni->ni_mlpid; args[2] = IEEE80211_REASON_MESH_MAX_RETRIES; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); ni->ni_mlrcnt = 0; mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); } else { args[0] = ni->ni_mlpid; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_OPEN, args); ni->ni_mlrcnt++; mesh_peer_timeout_backoff(ni); } break; case IEEE80211_NODE_MESH_CONFIRMRCV: args[0] = ni->ni_mlpid; args[2] = IEEE80211_REASON_MESH_CONFIRM_TIMEOUT; ieee80211_send_action(ni, IEEE80211_ACTION_CAT_SELF_PROT, IEEE80211_ACTION_MESHPEERING_CLOSE, args); mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING); mesh_peer_timeout_setup(ni); break; case IEEE80211_NODE_MESH_HOLDING: ni->ni_mlhcnt++; if (ni->ni_mlhcnt >= ieee80211_mesh_maxholding) callout_reset(&ni->ni_mlhtimer, ieee80211_mesh_backofftimeout, mesh_peer_backoff_cb, ni); mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE); break; } } static int mesh_verify_meshid(struct ieee80211vap *vap, const uint8_t *ie) { struct ieee80211_mesh_state *ms = vap->iv_mesh; if (ie == NULL || ie[1] != ms->ms_idlen) return 1; return memcmp(ms->ms_id, ie + 2, ms->ms_idlen); } /* * Check if we are using the same algorithms for this mesh. */ static int mesh_verify_meshconf(struct ieee80211vap *vap, const uint8_t *ie) { const struct ieee80211_meshconf_ie *meshconf = (const struct ieee80211_meshconf_ie *) ie; const struct ieee80211_mesh_state *ms = vap->iv_mesh; if (meshconf == NULL) return 1; if (meshconf->conf_pselid != ms->ms_ppath->mpp_ie) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown path selection algorithm: 0x%x\n", meshconf->conf_pselid); return 1; } if (meshconf->conf_pmetid != ms->ms_pmetric->mpm_ie) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown path metric algorithm: 0x%x\n", meshconf->conf_pmetid); return 1; } if (meshconf->conf_ccid != 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown congestion control algorithm: 0x%x\n", meshconf->conf_ccid); return 1; } if (meshconf->conf_syncid != IEEE80211_MESHCONF_SYNC_NEIGHOFF) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown sync algorithm: 0x%x\n", meshconf->conf_syncid); return 1; } if (meshconf->conf_authid != 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "unknown auth auth algorithm: 0x%x\n", meshconf->conf_pselid); return 1; } /* Not accepting peers */ if (!(meshconf->conf_cap & IEEE80211_MESHCONF_CAP_AP)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH, "not accepting peers: 0x%x\n", meshconf->conf_cap); return 1; } return 0; } static int mesh_verify_meshpeer(struct ieee80211vap *vap, uint8_t subtype, const uint8_t *ie) { const struct ieee80211_meshpeer_ie *meshpeer = (const struct ieee80211_meshpeer_ie *) ie; if (meshpeer == NULL || meshpeer->peer_len < IEEE80211_MPM_BASE_SZ || meshpeer->peer_len > IEEE80211_MPM_MAX_SZ) return 1; if (meshpeer->peer_proto != IEEE80211_MPPID_MPM) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, "Only MPM protocol is supported (proto: 0x%02X)", meshpeer->peer_proto); return 1; } switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: if (meshpeer->peer_len != IEEE80211_MPM_BASE_SZ) return 1; break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: if (meshpeer->peer_len != IEEE80211_MPM_BASE_SZ + 2) return 1; break; case IEEE80211_ACTION_MESHPEERING_CLOSE: if (meshpeer->peer_len < IEEE80211_MPM_BASE_SZ + 2) return 1; if (meshpeer->peer_len == (IEEE80211_MPM_BASE_SZ + 2) && meshpeer->peer_linkid != 0) return 1; if (meshpeer->peer_rcode == 0) return 1; break; } return 0; } /* * Add a Mesh ID IE to a frame. */ uint8_t * ieee80211_add_meshid(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211_mesh_state *ms = vap->iv_mesh; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mbss vap")); *frm++ = IEEE80211_ELEMID_MESHID; *frm++ = ms->ms_idlen; memcpy(frm, ms->ms_id, ms->ms_idlen); return frm + ms->ms_idlen; } /* * Add a Mesh Configuration IE to a frame. * For now just use HWMP routing, Airtime link metric, Null Congestion * Signaling, Null Sync Protocol and Null Authentication. */ uint8_t * ieee80211_add_meshconf(uint8_t *frm, struct ieee80211vap *vap) { const struct ieee80211_mesh_state *ms = vap->iv_mesh; uint16_t caps; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap")); *frm++ = IEEE80211_ELEMID_MESHCONF; *frm++ = IEEE80211_MESH_CONF_SZ; *frm++ = ms->ms_ppath->mpp_ie; /* path selection */ *frm++ = ms->ms_pmetric->mpm_ie; /* link metric */ *frm++ = IEEE80211_MESHCONF_CC_DISABLED; *frm++ = IEEE80211_MESHCONF_SYNC_NEIGHOFF; *frm++ = IEEE80211_MESHCONF_AUTH_DISABLED; /* NB: set the number of neighbors before the rest */ *frm = (ms->ms_neighbors > IEEE80211_MESH_MAX_NEIGHBORS ? IEEE80211_MESH_MAX_NEIGHBORS : ms->ms_neighbors) << 1; if (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) *frm |= IEEE80211_MESHCONF_FORM_GATE; frm += 1; caps = 0; if (ms->ms_flags & IEEE80211_MESHFLAGS_AP) caps |= IEEE80211_MESHCONF_CAP_AP; if (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) caps |= IEEE80211_MESHCONF_CAP_FWRD; *frm++ = caps; return frm; } /* * Add a Mesh Peer Management IE to a frame. */ uint8_t * ieee80211_add_meshpeer(uint8_t *frm, uint8_t subtype, uint16_t localid, uint16_t peerid, uint16_t reason) { KASSERT(localid != 0, ("localid == 0")); *frm++ = IEEE80211_ELEMID_MESHPEER; switch (subtype) { case IEEE80211_ACTION_MESHPEERING_OPEN: *frm++ = IEEE80211_MPM_BASE_SZ; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ break; case IEEE80211_ACTION_MESHPEERING_CONFIRM: KASSERT(peerid != 0, ("sending peer confirm without peer id")); *frm++ = IEEE80211_MPM_BASE_SZ + 2; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ ADDSHORT(frm, peerid); /* peer ID */ break; case IEEE80211_ACTION_MESHPEERING_CLOSE: if (peerid) *frm++ = IEEE80211_MPM_MAX_SZ; /* length */ else *frm++ = IEEE80211_MPM_BASE_SZ + 2; /* length */ ADDSHORT(frm, IEEE80211_MPPID_MPM); /* proto */ ADDSHORT(frm, localid); /* local ID */ if (peerid) ADDSHORT(frm, peerid); /* peer ID */ ADDSHORT(frm, reason); break; } return frm; } /* * Compute an Airtime Link Metric for the link with this node. * * Based on Draft 3.0 spec (11B.10, p.149). */ /* * Max 802.11s overhead. */ #define IEEE80211_MESH_MAXOVERHEAD \ (sizeof(struct ieee80211_qosframe_addr4) \ + sizeof(struct ieee80211_meshcntl_ae10) \ + sizeof(struct llc) \ + IEEE80211_ADDR_LEN \ + IEEE80211_WEP_IVLEN \ + IEEE80211_WEP_KIDLEN \ + IEEE80211_WEP_CRCLEN \ + IEEE80211_WEP_MICLEN \ + IEEE80211_CRC_LEN) uint32_t mesh_airtime_calc(struct ieee80211_node *ni) { #define M_BITS 8 #define S_FACTOR (2 * M_BITS) struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ni->ni_vap->iv_ifp; const static int nbits = 8192 << M_BITS; uint32_t overhead, rate, errrate; uint64_t res; /* Time to transmit a frame */ rate = ni->ni_txrate; overhead = ieee80211_compute_duration(ic->ic_rt, ifp->if_mtu + IEEE80211_MESH_MAXOVERHEAD, rate, 0) << M_BITS; /* Error rate in percentage */ /* XXX assuming small failures are ok */ errrate = (((ifp->if_get_counter(ifp, IFCOUNTER_OERRORS) + ifp->if_get_counter(ifp, IFCOUNTER_IERRORS)) / 100) << M_BITS) / 100; res = (overhead + (nbits / rate)) * ((1 << S_FACTOR) / ((1 << M_BITS) - errrate)); return (uint32_t)(res >> S_FACTOR); #undef M_BITS #undef S_FACTOR } /* * Add a Mesh Link Metric report IE to a frame. */ uint8_t * ieee80211_add_meshlmetric(uint8_t *frm, uint8_t flags, uint32_t metric) { *frm++ = IEEE80211_ELEMID_MESHLINK; *frm++ = 5; *frm++ = flags; ADDWORD(frm, metric); return frm; } /* * Add a Mesh Gate Announcement IE to a frame. */ uint8_t * ieee80211_add_meshgate(uint8_t *frm, struct ieee80211_meshgann_ie *ie) { *frm++ = IEEE80211_ELEMID_MESHGANN; /* ie */ *frm++ = IEEE80211_MESHGANN_BASE_SZ; /* len */ *frm++ = ie->gann_flags; *frm++ = ie->gann_hopcount; *frm++ = ie->gann_ttl; IEEE80211_ADDR_COPY(frm, ie->gann_addr); frm += 6; ADDWORD(frm, ie->gann_seq); ADDSHORT(frm, ie->gann_interval); return frm; } #undef ADDSHORT #undef ADDWORD /* * Initialize any mesh-specific node state. */ void ieee80211_mesh_node_init(struct ieee80211vap *vap, struct ieee80211_node *ni) { ni->ni_flags |= IEEE80211_NODE_QOS; callout_init(&ni->ni_mltimer, 1); callout_init(&ni->ni_mlhtimer, 1); } /* * Cleanup any mesh-specific node state. */ void ieee80211_mesh_node_cleanup(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_mesh_state *ms = vap->iv_mesh; callout_drain(&ni->ni_mltimer); callout_drain(&ni->ni_mlhtimer); /* NB: short-circuit callbacks after mesh_vdetach */ if (vap->iv_mesh != NULL) ms->ms_ppath->mpp_peerdown(ni); } void ieee80211_parse_meshid(struct ieee80211_node *ni, const uint8_t *ie) { ni->ni_meshidlen = ie[1]; memcpy(ni->ni_meshid, ie + 2, ie[1]); } /* * Setup mesh-specific node state on neighbor discovery. */ void ieee80211_mesh_init_neighbor(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_scanparams *sp) { ieee80211_parse_meshid(ni, sp->meshid); } void ieee80211_mesh_update_beacon(struct ieee80211vap *vap, struct ieee80211_beacon_offsets *bo) { KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap")); if (isset(bo->bo_flags, IEEE80211_BEACON_MESHCONF)) { (void)ieee80211_add_meshconf(bo->bo_meshconf, vap); clrbit(bo->bo_flags, IEEE80211_BEACON_MESHCONF); } } static int mesh_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_mesh_state *ms = vap->iv_mesh; uint8_t tmpmeshid[IEEE80211_NWID_LEN]; struct ieee80211_mesh_route *rt; struct ieee80211req_mesh_route *imr; size_t len, off; uint8_t *p; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_MESH_ID: ireq->i_len = ms->ms_idlen; memcpy(tmpmeshid, ms->ms_id, ireq->i_len); error = copyout(tmpmeshid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_MESH_AP: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_AP) != 0; break; case IEEE80211_IOC_MESH_FWRD: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) != 0; break; case IEEE80211_IOC_MESH_GATE: ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_GATE) != 0; break; case IEEE80211_IOC_MESH_TTL: ireq->i_val = ms->ms_ttl; break; case IEEE80211_IOC_MESH_RTCMD: switch (ireq->i_val) { case IEEE80211_MESH_RTCMD_LIST: len = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { len += sizeof(*imr); } MESH_RT_UNLOCK(ms); if (len > ireq->i_len || ireq->i_len < sizeof(*imr)) { ireq->i_len = len; return ENOMEM; } ireq->i_len = len; /* XXX M_WAIT? */ p = IEEE80211_MALLOC(len, M_TEMP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (p == NULL) return ENOMEM; off = 0; MESH_RT_LOCK(ms); TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) { if (off >= len) break; imr = (struct ieee80211req_mesh_route *) (p + off); IEEE80211_ADDR_COPY(imr->imr_dest, rt->rt_dest); IEEE80211_ADDR_COPY(imr->imr_nexthop, rt->rt_nexthop); imr->imr_metric = rt->rt_metric; imr->imr_nhops = rt->rt_nhops; imr->imr_lifetime = ieee80211_mesh_rt_update(rt, 0); imr->imr_lastmseq = rt->rt_lastmseq; imr->imr_flags = rt->rt_flags; /* last */ off += sizeof(*imr); } MESH_RT_UNLOCK(ms); error = copyout(p, (uint8_t *)ireq->i_data, ireq->i_len); IEEE80211_FREE(p, M_TEMP); break; case IEEE80211_MESH_RTCMD_FLUSH: case IEEE80211_MESH_RTCMD_ADD: case IEEE80211_MESH_RTCMD_DELETE: return EINVAL; default: return ENOSYS; } break; case IEEE80211_IOC_MESH_PR_METRIC: len = strlen(ms->ms_pmetric->mpm_descr); if (ireq->i_len < len) return EINVAL; ireq->i_len = len; error = copyout(ms->ms_pmetric->mpm_descr, (uint8_t *)ireq->i_data, len); break; case IEEE80211_IOC_MESH_PR_PATH: len = strlen(ms->ms_ppath->mpp_descr); if (ireq->i_len < len) return EINVAL; ireq->i_len = len; error = copyout(ms->ms_ppath->mpp_descr, (uint8_t *)ireq->i_data, len); break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_GET(mesh, mesh_ioctl_get80211); static int mesh_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_mesh_state *ms = vap->iv_mesh; uint8_t tmpmeshid[IEEE80211_NWID_LEN]; uint8_t tmpaddr[IEEE80211_ADDR_LEN]; char tmpproto[IEEE80211_MESH_PROTO_DSZ]; int error; if (vap->iv_opmode != IEEE80211_M_MBSS) return ENOSYS; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_MESH_ID: if (ireq->i_val != 0 || ireq->i_len > IEEE80211_MESHID_LEN) return EINVAL; error = copyin(ireq->i_data, tmpmeshid, ireq->i_len); if (error != 0) break; memset(ms->ms_id, 0, IEEE80211_NWID_LEN); ms->ms_idlen = ireq->i_len; memcpy(ms->ms_id, tmpmeshid, ireq->i_len); error = ENETRESET; break; case IEEE80211_IOC_MESH_AP: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_AP; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_AP; error = ENETRESET; break; case IEEE80211_IOC_MESH_FWRD: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_FWD; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_FWD; mesh_gatemode_setup(vap); break; case IEEE80211_IOC_MESH_GATE: if (ireq->i_val) ms->ms_flags |= IEEE80211_MESHFLAGS_GATE; else ms->ms_flags &= ~IEEE80211_MESHFLAGS_GATE; break; case IEEE80211_IOC_MESH_TTL: ms->ms_ttl = (uint8_t) ireq->i_val; break; case IEEE80211_IOC_MESH_RTCMD: switch (ireq->i_val) { case IEEE80211_MESH_RTCMD_LIST: return EINVAL; case IEEE80211_MESH_RTCMD_FLUSH: ieee80211_mesh_rt_flush(vap); break; case IEEE80211_MESH_RTCMD_ADD: error = copyin(ireq->i_data, tmpaddr, IEEE80211_ADDR_LEN); if (error != 0) break; if (IEEE80211_ADDR_EQ(vap->iv_myaddr, tmpaddr) || IEEE80211_ADDR_EQ(broadcastaddr, tmpaddr)) return EINVAL; ieee80211_mesh_discover(vap, tmpaddr, NULL); break; case IEEE80211_MESH_RTCMD_DELETE: error = copyin(ireq->i_data, tmpaddr, IEEE80211_ADDR_LEN); if (error != 0) break; ieee80211_mesh_rt_del(vap, tmpaddr); break; default: return ENOSYS; } break; case IEEE80211_IOC_MESH_PR_METRIC: error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto)); if (error == 0) { error = mesh_select_proto_metric(vap, tmpproto); if (error == 0) error = ENETRESET; } break; case IEEE80211_IOC_MESH_PR_PATH: error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto)); if (error == 0) { error = mesh_select_proto_path(vap, tmpproto); if (error == 0) error = ENETRESET; } break; default: return ENOSYS; } return error; } IEEE80211_IOCTL_SET(mesh, mesh_ioctl_set80211); diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c index 17904672a72b..1be27380b01e 100644 --- a/sys/net80211/ieee80211_output.c +++ b/sys/net80211/ieee80211_output.c @@ -1,4190 +1,4191 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #include #if defined(INET) || defined(INET6) #include #endif #ifdef INET #include #include #include #endif #ifdef INET6 #include #endif #include #define ETHER_HEADER_COPY(dst, src) \ memcpy(dst, src, sizeof(struct ether_header)) static int ieee80211_fragment(struct ieee80211vap *, struct mbuf *, u_int hdrsize, u_int ciphdrsize, u_int mtu); static void ieee80211_tx_mgt_cb(struct ieee80211_node *, void *, int); #ifdef IEEE80211_DEBUG /* * Decide if an outbound management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: return (vap->iv_opmode == IEEE80211_M_IBSS); } return 1; } #endif /* * Transmit a frame to the given destination on the given VAP. * * It's up to the caller to figure out the details of who this * is going to and resolving the node. * * This routine takes care of queuing it for power save, * A-MPDU state stuff, fast-frames state stuff, encapsulation * if required, then passing it up to the driver layer. * * This routine (for now) consumes the mbuf and frees the node * reference; it ideally will return a TX status which reflects * whether the mbuf was consumed or not, so the caller can * free the mbuf (if appropriate) and the node reference (again, * if appropriate.) */ int ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; int mcast; int do_ampdu = 0; #ifdef IEEE80211_SUPPORT_SUPERG int do_amsdu = 0; int do_ampdu_amsdu = 0; int no_ampdu = 1; /* Will be set to 0 if ampdu is active */ int do_ff = 0; #endif if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && (m->m_flags & M_PWR_SAV) == 0) { /* * Station in power save mode; pass the frame * to the 802.11 layer and continue. We'll get * the frame back when the time is right. * XXX lose WDS vap linkage? */ if (ieee80211_pwrsave(ni, m) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); /* * We queued it fine, so tell the upper layer * that we consumed it. */ return (0); } /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, m)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, ni->ni_macaddr, NULL, "%s", "classification failure"); vap->iv_stats.is_tx_classify++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (0); } /* * Stash the node pointer. Note that we do this after * any call to ieee80211_dwds_mcast because that code * uses any existing value for rcvif to identify the * interface it (might have been) received on. */ MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); m->m_pkthdr.rcvif = (void *)ni; mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1: 0; BPF_MTAP(ifp, m); /* 802.3 tx */ /* * Figure out if we can do A-MPDU, A-MSDU or FF. * * A-MPDU depends upon vap/node config. * A-MSDU depends upon vap/node config. * FF depends upon vap config, IE and whether * it's 11abg (and not 11n/11ac/etc.) * * Note that these flags indiciate whether we can do * it at all, rather than the situation (eg traffic type.) */ do_ampdu = ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) && (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX)); #ifdef IEEE80211_SUPPORT_SUPERG do_amsdu = ((ni->ni_flags & IEEE80211_NODE_AMSDU_TX) && (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX)); do_ff = ((ni->ni_flags & IEEE80211_NODE_HT) == 0) && ((ni->ni_flags & IEEE80211_NODE_VHT) == 0) && (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)); #endif /* * Check if A-MPDU tx aggregation is setup or if we * should try to enable it. The sta must be associated * with HT and A-MPDU enabled for use. When the policy * routine decides we should enable A-MPDU we issue an * ADDBA request and wait for a reply. The frame being * encapsulated will go out w/o using A-MPDU, or possibly * it might be collected by the driver and held/retransmit. * The default ic_ampdu_enable routine handles staggering * ADDBA requests in case the receiver NAK's us or we are * otherwise unable to establish a BA stream. * * Don't treat group-addressed frames as candidates for aggregation; * net80211 doesn't support 802.11aa-2012 and so group addressed * frames will always have sequence numbers allocated from the NON_QOS * TID. */ if (do_ampdu) { if ((m->m_flags & M_EAPOL) == 0 && (! mcast)) { int tid = WME_AC_TO_TID(M_WME_GETAC(m)); struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; ieee80211_txampdu_count_packet(tap); if (IEEE80211_AMPDU_RUNNING(tap)) { /* * Operational, mark frame for aggregation. * * XXX do tx aggregation here */ m->m_flags |= M_AMPDU_MPDU; } else if (!IEEE80211_AMPDU_REQUESTED(tap) && ic->ic_ampdu_enable(ni, tap)) { /* * Not negotiated yet, request service. */ ieee80211_ampdu_request(ni, tap); /* XXX hold frame for reply? */ } /* * Now update the no-ampdu flag. A-MPDU may have been * started or administratively disabled above; so now we * know whether we're running yet or not. * * This will let us know whether we should be doing A-MSDU * at this point. We only do A-MSDU if we're either not * doing A-MPDU, or A-MPDU is NACKed, or A-MPDU + A-MSDU * is available. * * Whilst here, update the amsdu-ampdu flag. The above may * have also set or cleared the amsdu-in-ampdu txa_flags * combination so we can correctly do A-MPDU + A-MSDU. */ #ifdef IEEE80211_SUPPORT_SUPERG no_ampdu = (! IEEE80211_AMPDU_RUNNING(tap) || (IEEE80211_AMPDU_NACKED(tap))); do_ampdu_amsdu = IEEE80211_AMPDU_RUNNING_AMSDU(tap); #endif } } #ifdef IEEE80211_SUPPORT_SUPERG /* * Check for AMSDU/FF; queue for aggregation * * Note: we don't bother trying to do fast frames or * A-MSDU encapsulation for 802.3 drivers. Now, we * likely could do it for FF (because it's a magic * atheros tunnel LLC type) but I don't think we're going * to really need to. For A-MSDU we'd have to set the * A-MSDU QoS bit in the wifi header, so we just plain * can't do it. */ if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) { if ((! mcast) && (do_ampdu_amsdu || (no_ampdu && do_amsdu)) && ieee80211_amsdu_tx_ok(ni)) { m = ieee80211_amsdu_check(ni, m); if (m == NULL) { /* NB: any ni ref held on stageq */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: amsdu_check queued frame\n", __func__); return (0); } } else if ((! mcast) && do_ff) { m = ieee80211_ff_check(ni, m); if (m == NULL) { /* NB: any ni ref held on stageq */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: ff_check queued frame\n", __func__); return (0); } } } #endif /* IEEE80211_SUPPORT_SUPERG */ /* * Grab the TX lock - serialise the TX process from this * point (where TX state is being checked/modified) * through to driver queue. */ IEEE80211_TX_LOCK(ic); /* * XXX make the encap and transmit code a separate function * so things like the FF (and later A-MSDU) path can just call * it for flushed frames. */ if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) { /* * Encapsulate the packet in prep for transmission. */ m = ieee80211_encap(vap, ni, m); if (m == NULL) { /* NB: stat+msg handled in ieee80211_encap */ IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(ni); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } } (void) ieee80211_parent_xmitpkt(ic, m); /* * Unlock at this point - no need to hold it across * ieee80211_free_node() (ie, the comlock) */ IEEE80211_TX_UNLOCK(ic); ic->ic_lastdata = ticks; return (0); } /* * Send the given mbuf through the given vap. * * This consumes the mbuf regardless of whether the transmit * was successful or not. * * This does none of the initial checks that ieee80211_start() * does (eg CAC timeout, interface wakeup) - the caller must * do this first. */ static int ieee80211_start_pkt(struct ieee80211vap *vap, struct mbuf *m) { #define IS_DWDS(vap) \ (vap->iv_opmode == IEEE80211_M_WDS && \ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; struct ether_header *eh; /* * Cancel any background scan. */ if (ic->ic_flags & IEEE80211_F_SCAN) ieee80211_cancel_anyscan(vap); /* * Find the node for the destination so we can do * things like power save and fast frames aggregation. * * NB: past this point various code assumes the first * mbuf has the 802.3 header present (and contiguous). */ ni = NULL; if (m->m_len < sizeof(struct ether_header) && (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "discard frame, %s\n", "m_pullup failed"); vap->iv_stats.is_tx_nobuf++; /* XXX */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } eh = mtod(m, struct ether_header *); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { if (IS_DWDS(vap)) { /* * Only unicast frames from the above go out * DWDS vaps; multicast frames are handled by * dispatching the frame as it comes through * the AP vap (see below). */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS, eh->ether_dhost, "mcast", "%s", "on DWDS"); vap->iv_stats.is_dwds_mcast++; m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX better status? */ return (ENOBUFS); } if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* * Spam DWDS vap's w/ multicast traffic. */ /* XXX only if dwds in use? */ ieee80211_dwds_mcast(vap, m); } } #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode != IEEE80211_M_MBSS) { #endif ni = ieee80211_find_txnode(vap, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); /* XXX better status? */ return (ENOBUFS); } if (ni->ni_associd == 0 && (ni->ni_flags & IEEE80211_NODE_ASSOCID)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, eh->ether_dhost, NULL, "sta not associated (type 0x%04x)", htons(eh->ether_type)); vap->iv_stats.is_tx_notassoc++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (ENOBUFS); } #ifdef IEEE80211_SUPPORT_MESH } else { if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) { /* * Proxy station only if configured. */ if (!ieee80211_mesh_isproxyena(vap)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_MESH, eh->ether_dhost, NULL, "%s", "proxy not enabled"); vap->iv_stats.is_mesh_notproxy++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); /* XXX better status? */ return (ENOBUFS); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "forward frame from DS SA(%6D), DA(%6D)\n", eh->ether_shost, ":", eh->ether_dhost, ":"); ieee80211_mesh_proxy_check(vap, eh->ether_shost); } ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m); if (ni == NULL) { /* * NB: ieee80211_mesh_discover holds/disposes * frame (e.g. queueing on path discovery). */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX better status? */ return (ENOBUFS); } } #endif /* * We've resolved the sender, so attempt to transmit it. */ if (vap->iv_state == IEEE80211_S_SLEEP) { /* * In power save; queue frame and then wakeup device * for transmit. */ ic->ic_lastdata = ticks; if (ieee80211_pwrsave(ni, m) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); ieee80211_new_state(vap, IEEE80211_S_RUN, 0); return (0); } if (ieee80211_vap_pkt_send_dest(vap, m, ni) != 0) return (ENOBUFS); return (0); #undef IS_DWDS } /* * Start method for vap's. All packets from the stack come * through here. We handle common processing of the packets * before dispatching them to the underlying device. * * if_transmit() requires that the mbuf be consumed by this call * regardless of the return condition. */ int ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; /* * No data frames go out unless we're running. * Note in particular this covers CAC and CSA * states (though maybe we should check muting * for CSA). */ if (vap->iv_state != IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_SLEEP) { IEEE80211_LOCK(ic); /* re-check under the com lock to avoid races */ if (vap->iv_state != IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_SLEEP) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: ignore queue, in %s state\n", __func__, ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_tx_badstate++; IEEE80211_UNLOCK(ic); ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENETDOWN); } IEEE80211_UNLOCK(ic); } /* * Sanitize mbuf flags for net80211 use. We cannot * clear M_PWR_SAV or M_MORE_DATA because these may * be set for frames that are re-submitted from the * power save queue. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA); /* * Bump to the packet transmission path. * The mbuf will be consumed here. */ return (ieee80211_start_pkt(vap, m)); } void ieee80211_vap_qflush(struct ifnet *ifp) { /* Empty for now */ } /* * 802.11 raw output routine. * * XXX TODO: this (and other send routines) should correctly * XXX keep the pwr mgmt bit set if it decides to call into the * XXX driver to send a frame whilst the state is SLEEP. * * Otherwise the peer may decide that we're awake and flood us * with traffic we are still too asleep to receive! */ int ieee80211_raw_output(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = vap->iv_ic; int error; /* * Set node - the caller has taken a reference, so ensure * that the mbuf has the same node value that * it would if it were going via the normal path. */ MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); m->m_pkthdr.rcvif = (void *)ni; /* * Attempt to add bpf transmit parameters. * * For now it's ok to fail; the raw_xmit api still takes * them as an option. * * Later on when ic_raw_xmit() has params removed, * they'll have to be added - so fail the transmit if * they can't be. */ if (params) (void) ieee80211_add_xmit_params(m, params); error = ic->ic_raw_xmit(ni, m, params); if (error) { if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } return (error); } static int ieee80211_validate_frame(struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211_frame *wh; int type; if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_ack)) return (EINVAL); wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) return (EINVAL); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; if (type != IEEE80211_FC0_TYPE_DATA) { if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) return (EINVAL); if (type != IEEE80211_FC0_TYPE_MGT && (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) != 0) return (EINVAL); /* XXX skip other field checks? */ } if ((params && (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0) || (IEEE80211_IS_PROTECTED(wh))) { int subtype; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * See IEEE Std 802.11-2012, * 8.2.4.1.9 'Protected Frame field' */ /* XXX no support for robust management frames yet. */ if (!(type == IEEE80211_FC0_TYPE_DATA || (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_AUTH))) return (EINVAL); wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; } if (m->m_pkthdr.len < ieee80211_anyhdrsize(wh)) return (EINVAL); return (0); } static int ieee80211_validate_rate(struct ieee80211_node *ni, uint8_t rate) { struct ieee80211com *ic = ni->ni_ic; if (IEEE80211_IS_HT_RATE(rate)) { if ((ic->ic_htcaps & IEEE80211_HTC_HT) == 0) return (EINVAL); rate = IEEE80211_RV(rate); if (rate <= 31) { if (rate > ic->ic_txstream * 8 - 1) return (EINVAL); return (0); } if (rate == 32) { if ((ic->ic_htcaps & IEEE80211_HTC_TXMCS32) == 0) return (EINVAL); return (0); } if ((ic->ic_htcaps & IEEE80211_HTC_TXUNEQUAL) == 0) return (EINVAL); switch (ic->ic_txstream) { case 0: case 1: return (EINVAL); case 2: if (rate > 38) return (EINVAL); return (0); case 3: if (rate > 52) return (EINVAL); return (0); case 4: default: if (rate > 76) return (EINVAL); return (0); } } if (!ieee80211_isratevalid(ic->ic_rt, rate)) return (EINVAL); return (0); } static int ieee80211_sanitize_rates(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { int error; if (!params) return (0); /* nothing to do */ /* NB: most drivers assume that ibp_rate0 is set (!= 0). */ if (params->ibp_rate0 != 0) { error = ieee80211_validate_rate(ni, params->ibp_rate0); if (error != 0) return (error); } else { /* XXX pre-setup some default (e.g., mgmt / mcast) rate */ /* XXX __DECONST? */ (void) m; } if (params->ibp_rate1 != 0 && (error = ieee80211_validate_rate(ni, params->ibp_rate1)) != 0) return (error); if (params->ibp_rate2 != 0 && (error = ieee80211_validate_rate(ni, params->ibp_rate2)) != 0) return (error); if (params->ibp_rate3 != 0 && (error = ieee80211_validate_rate(ni, params->ibp_rate3)) != 0) return (error); return (0); } /* * 802.11 output routine. This is (currently) used only to * connect bpf write calls to the 802.11 layer for injecting * raw 802.11 frames. */ int ieee80211_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { #define senderr(e) do { error = (e); goto bad;} while (0) const struct ieee80211_bpf_params *params = NULL; struct ieee80211_node *ni = NULL; struct ieee80211vap *vap; struct ieee80211_frame *wh; struct ieee80211com *ic = NULL; int error; int ret; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { /* * Short-circuit requests if the vap is marked OACTIVE * as this can happen because a packet came down through * ieee80211_start before the vap entered RUN state in * which case it's ok to just drop the frame. This * should not be necessary but callers of if_output don't * check OACTIVE. */ senderr(ENETDOWN); } vap = ifp->if_softc; ic = vap->iv_ic; /* * Hand to the 802.3 code if not tagged as * a raw 802.11 frame. */ if (dst->sa_family != AF_IEEE80211) return vap->iv_output(ifp, m, dst, ro); #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) senderr(error); #endif if (ifp->if_flags & IFF_MONITOR) senderr(ENETDOWN); if (!IFNET_IS_UP_RUNNING(ifp)) senderr(ENETDOWN); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, "block %s frame in CAC state\n", "raw data"); vap->iv_stats.is_tx_badstate++; senderr(EIO); /* XXX */ } else if (vap->iv_state == IEEE80211_S_SCAN) senderr(EIO); /* XXX bypass bridge, pfil, carp, etc. */ /* * NB: DLT_IEEE802_11_RADIO identifies the parameters are * present by setting the sa_len field of the sockaddr (yes, * this is a hack). * NB: we assume sa_data is suitably aligned to cast. */ if (dst->sa_len != 0) params = (const struct ieee80211_bpf_params *)dst->sa_data; error = ieee80211_validate_frame(m, params); if (error != 0) senderr(error); wh = mtod(m, struct ieee80211_frame *); /* locate destination node */ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: case IEEE80211_FC1_DIR_FROMDS: ni = ieee80211_find_txnode(vap, wh->i_addr1); break; case IEEE80211_FC1_DIR_TODS: case IEEE80211_FC1_DIR_DSTODS: ni = ieee80211_find_txnode(vap, wh->i_addr3); break; default: senderr(EDOOFUS); } if (ni == NULL) { /* * Permit packets w/ bpf params through regardless * (see below about sa_len). */ if (dst->sa_len == 0) senderr(EHOSTUNREACH); ni = ieee80211_ref_node(vap->iv_bss); } /* * Sanitize mbuf for net80211 flags leaked from above. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~M_80211_TX; m->m_flags |= M_ENCAP; /* mark encapsulated */ if (IEEE80211_IS_DATA(wh)) { /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, m)) senderr(EIO); /* XXX */ /* NB: ieee80211_encap does not include 802.11 header */ IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len - ieee80211_hdrsize(wh)); } else M_WME_SETAC(m, WME_AC_BE); error = ieee80211_sanitize_rates(ni, m, params); if (error != 0) senderr(error); IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); IEEE80211_TX_LOCK(ic); ret = ieee80211_raw_output(vap, ni, m, params); IEEE80211_TX_UNLOCK(ic); return (ret); bad: if (m != NULL) m_freem(m); if (ni != NULL) ieee80211_free_node(ni); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return error; #undef senderr } /* * Set the direction field and address fields of an outgoing * frame. Note this should be called early on in constructing * a frame as it sets i_fc[1]; other bits can then be or'd in. */ void ieee80211_send_setup( struct ieee80211_node *ni, struct mbuf *m, int type, int tid, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN]) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)wh) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_tx_ampdu *tap; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); ieee80211_seq seqno; IEEE80211_TX_LOCK_ASSERT(ni->ni_ic); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type; if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) { switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, bssid); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, da); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, bssid); IEEE80211_ADDR_COPY(wh->i_addr3, sa); break; case IEEE80211_M_WDS: wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); break; case IEEE80211_M_MBSS: #ifdef IEEE80211_SUPPORT_MESH if (IEEE80211_IS_MULTICAST(da)) { wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; /* XXX next hop */ IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); } else { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); } #endif break; case IEEE80211_M_MONITOR: /* NB: to quiet compiler */ break; } } else { wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) IEEE80211_ADDR_COPY(wh->i_addr3, sa); else #endif IEEE80211_ADDR_COPY(wh->i_addr3, bssid); } *(uint16_t *)&wh->i_dur[0] = 0; /* * XXX TODO: this is what the TX lock is for. * Here we're incrementing sequence numbers, and they * need to be in lock-step with what the driver is doing * both in TX ordering and crypto encap (IV increment.) * * If the driver does seqno itself, then we can skip * assigning sequence numbers here, and we can avoid * requiring the TX lock. */ tap = &ni->ni_tx_ampdu[tid]; if (tid != IEEE80211_NONQOS_TID && IEEE80211_AMPDU_RUNNING(tap)) { m->m_flags |= M_AMPDU_MPDU; /* NB: zero out i_seq field (for s/w encryption etc) */ *(uint16_t *)&wh->i_seq[0] = 0; } else { if (IEEE80211_HAS_SEQ(type & IEEE80211_FC0_TYPE_MASK, type & IEEE80211_FC0_SUBTYPE_MASK)) /* * 802.11-2012 9.3.2.10 - QoS multicast frames * come out of a different seqno space. */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; } else { seqno = ni->ni_txseqs[tid]++; } else seqno = 0; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } if (IEEE80211_IS_MULTICAST(wh->i_addr1)) m->m_flags |= M_MCAST; #undef WH4 } /* * Send a management frame to the specified node. The node pointer * must have a reference as the pointer will be passed to the driver * and potentially held for a long time. If the frame is successfully * dispatched to the driver, then it is responsible for freeing the * reference (and potentially free'ing up any associated storage); * otherwise deal with reclaiming any reference (on error). */ int ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type, struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; int ret; KASSERT(ni != NULL, ("null node")); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", ieee80211_mgt_subtype_name(type)); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(ni); m_freem(m); return EIO; /* XXX */ } - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); if (m == NULL) { ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1, "encrypting frame (%s)", __func__); wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; } m->m_flags |= M_ENCAP; /* mark encapsulated */ KASSERT(type != IEEE80211_FC0_SUBTYPE_PROBE_RESP, ("probe response?")); M_WME_SETAC(m, params->ibp_pri); #ifdef IEEE80211_DEBUG /* avoid printing too many frames */ if ((ieee80211_msg_debug(vap) && doprint(vap, type)) || ieee80211_msg_dumppkts(vap)) { ieee80211_note(vap, "[%s] send %s on channel %u\n", ether_sprintf(wh->i_addr1), ieee80211_mgt_subtype_name(type), ieee80211_chan2ieee(ic, ic->ic_curchan)); } #endif IEEE80211_NODE_STAT(ni, tx_mgmt); ret = ieee80211_raw_output(vap, ni, m, params); IEEE80211_TX_UNLOCK(ic); return (ret); } static void ieee80211_nulldata_transmitted(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211vap *vap = ni->ni_vap; wakeup(vap); } /* * Send a null data frame to the specified node. If the station * is setup for QoS then a QoS Null Data frame is constructed. * If this is a WDS station then a 4-address frame is constructed. * * NB: the caller is assumed to have setup a node reference * for use; this is necessary to deal with a race condition * when probing for inactive stations. Like ieee80211_mgmt_output * we must cleanup any node reference on error; however we * can safely just unref it as we know it will never be the * last reference to the node. */ int ieee80211_send_nulldata(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m; struct ieee80211_frame *wh; int hdrlen; uint8_t *frm; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", "null data"); ieee80211_unref_node(&ni); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } if (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) hdrlen = sizeof(struct ieee80211_qosframe); else hdrlen = sizeof(struct ieee80211_frame); /* NB: only WDS vap's get 4-address frames */ if (vap->iv_opmode == IEEE80211_M_WDS) hdrlen += IEEE80211_ADDR_LEN; if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrlen = roundup(hdrlen, sizeof(uint32_t)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + hdrlen, 0); if (m == NULL) { /* XXX debug msg */ ieee80211_unref_node(&ni); vap->iv_stats.is_tx_nobuf++; return ENOMEM; } KASSERT(M_LEADINGSPACE(m) >= hdrlen, ("leading space %zd", M_LEADINGSPACE(m))); - M_PREPEND(m, hdrlen, M_NOWAIT); + M_PREPEND(m, hdrlen, IEEE80211_M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */ if (ni->ni_flags & IEEE80211_NODE_QOS) { const int tid = WME_AC_TO_TID(WME_AC_BE); uint8_t *qos; ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL, tid, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_WDS) qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; else qos = ((struct ieee80211_qosframe *) wh)->i_qos; qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[WME_AC_BE].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; qos[1] = 0; } else { ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); } if (vap->iv_opmode != IEEE80211_M_WDS) { /* NB: power management bit is never sent by an AP */ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && vap->iv_opmode != IEEE80211_M_HOSTAP) wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT; } if ((ic->ic_flags & IEEE80211_F_SCAN) && (ni->ni_flags & IEEE80211_NODE_PWR_MGT)) { ieee80211_add_callback(m, ieee80211_nulldata_transmitted, NULL); } m->m_len = m->m_pkthdr.len = hdrlen; m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_data); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, ni, "send %snull data frame on channel %u, pwr mgt %s", ni->ni_flags & IEEE80211_NODE_QOS ? "QoS " : "", ieee80211_chan2ieee(ic, ic->ic_curchan), wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis"); ret = ieee80211_raw_output(vap, ni, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Assign priority to a frame based on any vlan tag assigned * to the station and/or any Diffserv setting in an IP header. * Finally, if an ACM policy is setup (in station mode) it's * applied. */ int ieee80211_classify(struct ieee80211_node *ni, struct mbuf *m) { const struct ether_header *eh = NULL; uint16_t ether_type; int v_wme_ac, d_wme_ac, ac; if (__predict_false(m->m_flags & M_ENCAP)) { struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); struct llc *llc; int hdrlen, subtype; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype & IEEE80211_FC0_SUBTYPE_NODATA) { ac = WME_AC_BE; goto done; } hdrlen = ieee80211_hdrsize(wh); if (m->m_pkthdr.len < hdrlen + sizeof(*llc)) return 1; llc = (struct llc *)mtodo(m, hdrlen); if (llc->llc_dsap != LLC_SNAP_LSAP || llc->llc_ssap != LLC_SNAP_LSAP || llc->llc_control != LLC_UI || llc->llc_snap.org_code[0] != 0 || llc->llc_snap.org_code[1] != 0 || llc->llc_snap.org_code[2] != 0) return 1; ether_type = llc->llc_snap.ether_type; } else { eh = mtod(m, struct ether_header *); ether_type = eh->ether_type; } /* * Always promote PAE/EAPOL frames to high priority. */ if (ether_type == htons(ETHERTYPE_PAE)) { /* NB: mark so others don't need to check header */ m->m_flags |= M_EAPOL; ac = WME_AC_VO; goto done; } /* * Non-qos traffic goes to BE. */ if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0) { ac = WME_AC_BE; goto done; } /* * If node has a vlan tag then all traffic * to it must have a matching tag. */ v_wme_ac = 0; if (ni->ni_vlan != 0) { if ((m->m_flags & M_VLANTAG) == 0) { IEEE80211_NODE_STAT(ni, tx_novlantag); return 1; } if (EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != EVL_VLANOFTAG(ni->ni_vlan)) { IEEE80211_NODE_STAT(ni, tx_vlanmismatch); return 1; } /* map vlan priority to AC */ v_wme_ac = TID_TO_WME_AC(EVL_PRIOFTAG(ni->ni_vlan)); } /* XXX m_copydata may be too slow for fast path */ #ifdef INET if (eh && eh->ether_type == htons(ETHERTYPE_IP)) { uint8_t tos; /* * IP frame, map the DSCP bits from the TOS field. */ /* NB: ip header may not be in first mbuf */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip, ip_tos), sizeof(tos), &tos); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET */ #ifdef INET6 if (eh && eh->ether_type == htons(ETHERTYPE_IPV6)) { uint32_t flow; uint8_t tos; /* * IPv6 frame, map the DSCP bits from the traffic class field. */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip6_hdr, ip6_flow), sizeof(flow), (caddr_t) &flow); tos = (uint8_t)(ntohl(flow) >> 20); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET6 */ d_wme_ac = WME_AC_BE; #ifdef INET6 } #endif #ifdef INET } #endif /* * Use highest priority AC. */ if (v_wme_ac > d_wme_ac) ac = v_wme_ac; else ac = d_wme_ac; /* * Apply ACM policy. */ if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) { static const int acmap[4] = { WME_AC_BK, /* WME_AC_BE */ WME_AC_BK, /* WME_AC_BK */ WME_AC_BE, /* WME_AC_VI */ WME_AC_VI, /* WME_AC_VO */ }; struct ieee80211com *ic = ni->ni_ic; while (ac != WME_AC_BK && ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm) ac = acmap[ac]; } done: M_WME_SETAC(m, ac); return 0; } /* * Insure there is sufficient contiguous space to encapsulate the * 802.11 data frame. If room isn't already there, arrange for it. * Drivers and cipher modules assume we have done the necessary work * and fail rudely if they don't find the space they need. */ struct mbuf * ieee80211_mbuf_adjust(struct ieee80211vap *vap, int hdrsize, struct ieee80211_key *key, struct mbuf *m) { #define TO_BE_RECLAIMED (sizeof(struct ether_header) - sizeof(struct llc)) int needed_space = vap->iv_ic->ic_headroom + hdrsize; if (key != NULL) { /* XXX belongs in crypto code? */ needed_space += key->wk_cipher->ic_header; /* XXX frags */ /* * When crypto is being done in the host we must insure * the data are writable for the cipher routines; clone * a writable mbuf chain. * XXX handle SWMIC specially */ if (key->wk_flags & (IEEE80211_KEY_SWENCRYPT|IEEE80211_KEY_SWENMIC)) { - m = m_unshare(m, M_NOWAIT); + m = m_unshare(m, IEEE80211_M_NOWAIT); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot get writable mbuf\n", __func__); vap->iv_stats.is_tx_nobuf++; /* XXX new stat */ return NULL; } } } /* * We know we are called just before stripping an Ethernet * header and prepending an LLC header. This means we know * there will be * sizeof(struct ether_header) - sizeof(struct llc) * bytes recovered to which we need additional space for the * 802.11 header and any crypto header. */ /* XXX check trailing space and copy instead? */ if (M_LEADINGSPACE(m) < needed_space - TO_BE_RECLAIMED) { - struct mbuf *n = m_gethdr(M_NOWAIT, m->m_type); + struct mbuf *n = m_gethdr(IEEE80211_M_NOWAIT, m->m_type); if (n == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot expand storage\n", __func__); vap->iv_stats.is_tx_nobuf++; m_freem(m); return NULL; } KASSERT(needed_space <= MHLEN, ("not enough room, need %u got %d\n", needed_space, MHLEN)); /* * Setup new mbuf to have leading space to prepend the * 802.11 header and any crypto header bits that are * required (the latter are added when the driver calls * back to ieee80211_crypto_encap to do crypto encapsulation). */ /* NB: must be first 'cuz it clobbers m_data */ m_move_pkthdr(n, m); n->m_len = 0; /* NB: m_gethdr does not set */ n->m_data += needed_space; /* * Pull up Ethernet header to create the expected layout. * We could use m_pullup but that's overkill (i.e. we don't * need the actual data) and it cannot fail so do it inline * for speed. */ /* NB: struct ether_header is known to be contiguous */ n->m_len += sizeof(struct ether_header); m->m_len -= sizeof(struct ether_header); m->m_data += sizeof(struct ether_header); /* * Replace the head of the chain. */ n->m_next = m; m = n; } return m; #undef TO_BE_RECLAIMED } /* * Return the transmit key to use in sending a unicast frame. * If a unicast key is set we use that. When no unicast key is set * we fall back to the default transmit key. */ static __inline struct ieee80211_key * ieee80211_crypto_getucastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } else { return &ni->ni_ucastkey; } } /* * Return the transmit key to use in sending a multicast frame. * Multicast traffic always uses the group key which is installed as * the default tx key. */ static __inline struct ieee80211_key * ieee80211_crypto_getmcastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } /* * Encapsulate an outbound data frame. The mbuf chain is updated. * If an error is encountered NULL is returned. The caller is required * to provide a node reference and pullup the ethernet header in the * first mbuf. * * NB: Packet is assumed to be processed by ieee80211_classify which * marked EAPOL frames w/ M_EAPOL. */ struct mbuf * ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh)) #define MC01(mc) ((struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211com *ic = ni->ni_ic; #ifdef IEEE80211_SUPPORT_MESH struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshcntl_ae10 *mc; struct ieee80211_mesh_route *rt = NULL; int dir = -1; #endif struct ether_header eh; struct ieee80211_frame *wh; struct ieee80211_key *key; struct llc *llc; int hdrsize, hdrspace, datalen, addqos, txfrag, is4addr, is_mcast; ieee80211_seq seqno; int meshhdrsize, meshae; uint8_t *qos; int is_amsdu = 0; IEEE80211_TX_LOCK_ASSERT(ic); is_mcast = !! (m->m_flags & (M_MCAST | M_BCAST)); /* * Copy existing Ethernet header to a safe place. The * rest of the code assumes it's ok to strip it when * reorganizing state for the final encapsulation. */ KASSERT(m->m_len >= sizeof(eh), ("no ethernet header!")); ETHER_HEADER_COPY(&eh, mtod(m, caddr_t)); /* * Insure space for additional headers. First identify * transmit key to use in calculating any buffer adjustments * required. This is also used below to do privacy * encapsulation work. Then calculate the 802.11 header * size and any padding required by the driver. * * Note key may be NULL if we fall back to the default * transmit key and that is not set. In that case the * buffer may not be expanded as needed by the cipher * routines, but they will/should discard it. */ if (vap->iv_flags & IEEE80211_F_PRIVACY) { if (vap->iv_opmode == IEEE80211_M_STA || !IEEE80211_IS_MULTICAST(eh.ether_dhost) || (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) { key = ieee80211_crypto_getucastkey(vap, ni); } else if ((vap->iv_opmode == IEEE80211_M_WDS) && (! (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) { /* * Use ucastkey for DWDS transmit nodes, multicast * or otherwise. * * This is required to ensure that multicast frames * from a DWDS AP to a DWDS STA is encrypted with * a key that can actually work. * * There's no default key for multicast traffic * on a DWDS WDS VAP node (note NOT the DWDS enabled * AP VAP, the dynamically created per-STA WDS node) * so encap fails and transmit fails. */ key = ieee80211_crypto_getucastkey(vap, ni); } else { key = ieee80211_crypto_getmcastkey(vap, ni); } if (key == NULL && (m->m_flags & M_EAPOL) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, eh.ether_dhost, "no default transmit key (%s) deftxkey %u", __func__, vap->iv_def_txkey); vap->iv_stats.is_tx_nodefkey++; goto bad; } } else key = NULL; /* * XXX Some ap's don't handle QoS-encapsulated EAPOL * frames so suppress use. This may be an issue if other * ap's require all data frames to be QoS-encapsulated * once negotiated in which case we'll need to make this * configurable. * * Don't send multicast QoS frames. * Technically multicast frames can be QoS if all stations in the * BSS are also QoS. * * NB: mesh data frames are QoS, including multicast frames. */ addqos = (((is_mcast == 0) && (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT))) || (vap->iv_opmode == IEEE80211_M_MBSS)) && (m->m_flags & M_EAPOL) == 0; if (addqos) hdrsize = sizeof(struct ieee80211_qosframe); else hdrsize = sizeof(struct ieee80211_frame); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { /* * Mesh data frames are encapsulated according to the * rules of Section 11B.8.5 (p.139 of D3.0 spec). * o Group Addressed data (aka multicast) originating * at the local sta are sent w/ 3-address format and * address extension mode 00 * o Individually Addressed data (aka unicast) originating * at the local sta are sent w/ 4-address format and * address extension mode 00 * o Group Addressed data forwarded from a non-mesh sta are * sent w/ 3-address format and address extension mode 01 * o Individually Address data from another sta are sent * w/ 4-address format and address extension mode 10 */ is4addr = 0; /* NB: don't use, disable */ if (!IEEE80211_IS_MULTICAST(eh.ether_dhost)) { rt = ieee80211_mesh_rt_find(vap, eh.ether_dhost); KASSERT(rt != NULL, ("route is NULL")); dir = IEEE80211_FC1_DIR_DSTODS; hdrsize += IEEE80211_ADDR_LEN; if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { if (IEEE80211_ADDR_EQ(rt->rt_mesh_gate, vap->iv_myaddr)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, eh.ether_dhost, "%s", "trying to send to ourself"); goto bad; } meshae = IEEE80211_MESH_AE_10; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae10); } else { meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } else { dir = IEEE80211_FC1_DIR_FROMDS; if (!IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) { /* proxy group */ meshae = IEEE80211_MESH_AE_01; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae01); } else { /* group */ meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } } else { #endif /* * 4-address frames need to be generated for: * o packets sent through a WDS vap (IEEE80211_M_WDS) * o packets sent through a vap marked for relaying * (e.g. a station operating with dynamic WDS) */ is4addr = vap->iv_opmode == IEEE80211_M_WDS || ((vap->iv_flags_ext & IEEE80211_FEXT_4ADDR) && !IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)); if (is4addr) hdrsize += IEEE80211_ADDR_LEN; meshhdrsize = meshae = 0; #ifdef IEEE80211_SUPPORT_MESH } #endif /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; if (__predict_true((m->m_flags & M_FF) == 0)) { /* * Normal frame. */ m = ieee80211_mbuf_adjust(vap, hdrspace + meshhdrsize, key, m); if (m == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ goto bad; } /* NB: this could be optimized 'cuz of ieee80211_mbuf_adjust */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh.ether_type; } else { #ifdef IEEE80211_SUPPORT_SUPERG /* * Aggregated frame. Check if it's for AMSDU or FF. * * XXX TODO: IEEE80211_NODE_AMSDU* isn't implemented * anywhere for some reason. But, since 11n requires * AMSDU RX, we can just assume "11n" == "AMSDU". */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: called; M_FF\n", __func__); if (ieee80211_amsdu_tx_ok(ni)) { m = ieee80211_amsdu_encap(vap, m, hdrspace + meshhdrsize, key); is_amsdu = 1; } else { m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key); } if (m == NULL) #endif goto bad; } datalen = m->m_pkthdr.len; /* NB: w/o 802.11 header */ - M_PREPEND(m, hdrspace + meshhdrsize, M_NOWAIT); + M_PREPEND(m, hdrspace + meshhdrsize, IEEE80211_M_NOWAIT); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; goto bad; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(uint16_t *)wh->i_dur = 0; qos = NULL; /* NB: quiet compiler */ if (is4addr) { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); } else switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); /* * NB: always use the bssid from iv_bss as the * neighbor's may be stale after an ibss merge */ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); break; #ifdef IEEE80211_SUPPORT_MESH case IEEE80211_M_MBSS: /* NB: offset by hdrspace to deal with DATAPAD */ mc = (struct ieee80211_meshcntl_ae10 *) (mtod(m, uint8_t *) + hdrspace); wh->i_fc[1] = dir; switch (meshae) { case IEEE80211_MESH_AE_00: /* no proxy */ mc->mc_flags = 0; if (dir == IEEE80211_FC1_DIR_DSTODS) { /* ucast */ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); qos =((struct ieee80211_qosframe_addr4 *) wh)->i_qos; } else if (dir == IEEE80211_FC1_DIR_FROMDS) { /* mcast */ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; } break; case IEEE80211_MESH_AE_01: /* mcast, proxy */ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_myaddr); mc->mc_flags = 1; IEEE80211_ADDR_COPY(MC01(mc)->mc_addr4, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; break; case IEEE80211_MESH_AE_10: /* ucast, proxy */ KASSERT(rt != NULL, ("route is NULL")); IEEE80211_ADDR_COPY(wh->i_addr1, rt->rt_nexthop); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, rt->rt_mesh_gate); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, vap->iv_myaddr); mc->mc_flags = IEEE80211_MESH_AE_10; IEEE80211_ADDR_COPY(mc->mc_addr5, eh.ether_dhost); IEEE80211_ADDR_COPY(mc->mc_addr6, eh.ether_shost); qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; break; default: KASSERT(0, ("meshae %d", meshae)); break; } mc->mc_ttl = ms->ms_ttl; ms->ms_seq++; le32enc(mc->mc_seq, ms->ms_seq); break; #endif case IEEE80211_M_WDS: /* NB: is4addr should always be true */ default: goto bad; } if (m->m_flags & M_MORE_DATA) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; if (addqos) { int ac, tid; if (is4addr) { qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; /* NB: mesh case handled earlier */ } else if (vap->iv_opmode != IEEE80211_M_MBSS) qos = ((struct ieee80211_qosframe *) wh)->i_qos; ac = M_WME_GETAC(m); /* map from access class/queue to 11e header priorty value */ tid = WME_AC_TO_TID(ac); qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) qos[1] = IEEE80211_QOS_MC; else #endif qos[1] = 0; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; /* * If this is an A-MSDU then ensure we set the * relevant field. */ if (is_amsdu) qos[0] |= IEEE80211_QOS_AMSDU; /* * XXX TODO TX lock is needed for atomic updates of sequence * numbers. If the driver does it, then don't do it here; * and we don't need the TX lock held. */ if ((m->m_flags & M_AMPDU_MPDU) == 0) { /* * 802.11-2012 9.3.2.10 - * * If this is a multicast frame then we need * to ensure that the sequence number comes from * a separate seqno space and not the TID space. * * Otherwise multicast frames may actually cause * holes in the TX blockack window space and * upset various things. */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; else seqno = ni->ni_txseqs[tid]++; /* * NB: don't assign a sequence # to potential * aggregates; we expect this happens at the * point the frame comes off any aggregation q * as otherwise we may introduce holes in the * BA sequence space and/or make window accouting * more difficult. * * XXX may want to control this with a driver * capability; this may also change when we pull * aggregation up into net80211 */ *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } else { /* NB: zero out i_seq field (for s/w encryption etc) */ *(uint16_t *)wh->i_seq = 0; } } else { /* * XXX TODO TX lock is needed for atomic updates of sequence * numbers. If the driver does it, then don't do it here; * and we don't need the TX lock held. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); /* * XXX TODO: we shouldn't allow EAPOL, etc that would * be forced to be non-QoS traffic to be A-MSDU encapsulated. */ if (is_amsdu) printf("%s: XXX ERROR: is_amsdu set; not QoS!\n", __func__); } /* * Check if xmit fragmentation is required. * * If the hardware does fragmentation offload, then don't bother * doing it here. */ if (IEEE80211_CONF_FRAG_OFFLOAD(ic)) txfrag = 0; else txfrag = (m->m_pkthdr.len > vap->iv_fragthreshold && !IEEE80211_IS_MULTICAST(wh->i_addr1) && (vap->iv_caps & IEEE80211_C_TXFRAG) && (m->m_flags & (M_FF | M_AMPDU_MPDU)) == 0); if (key != NULL) { /* * IEEE 802.1X: send EAPOL frames always in the clear. * WPA/WPA2: encrypt EAPOL keys when pairwise keys are set. */ if ((m->m_flags & M_EAPOL) == 0 || ((vap->iv_flags & IEEE80211_F_WPA) && (vap->iv_opmode == IEEE80211_M_STA ? !IEEE80211_KEY_UNDEFINED(key) : !IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)))) { wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; if (!ieee80211_crypto_enmic(vap, key, m, txfrag)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, eh.ether_dhost, "%s", "enmic failed, discard frame"); vap->iv_stats.is_crypto_enmicfail++; goto bad; } } } if (txfrag && !ieee80211_fragment(vap, m, hdrsize, key != NULL ? key->wk_cipher->ic_header : 0, vap->iv_fragthreshold)) goto bad; m->m_flags |= M_ENCAP; /* mark encapsulated */ IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen); return m; bad: if (m != NULL) m_freem(m); return NULL; #undef WH4 #undef MC01 } void ieee80211_free_mbuf(struct mbuf *m) { struct mbuf *next; if (m == NULL) return; do { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } while ((m = next) != NULL); } /* * Fragment the frame according to the specified mtu. * The size of the 802.11 header (w/o padding) is provided * so we don't need to recalculate it. We create a new * mbuf for each fragment and chain it through m_nextpkt; * we might be able to optimize this by reusing the original * packet's mbufs but that is significantly more complicated. */ static int ieee80211_fragment(struct ieee80211vap *vap, struct mbuf *m0, u_int hdrsize, u_int ciphdrsize, u_int mtu) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame *wh, *whf; struct mbuf *m, *prev; u_int totalhdrsize, fragno, fragsize, off, remainder, payload; u_int hdrspace; KASSERT(m0->m_nextpkt == NULL, ("mbuf already chained?")); KASSERT(m0->m_pkthdr.len > mtu, ("pktlen %u mtu %u", m0->m_pkthdr.len, mtu)); /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; wh = mtod(m0, struct ieee80211_frame *); /* NB: mark the first frag; it will be propagated below */ wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; totalhdrsize = hdrspace + ciphdrsize; fragno = 1; off = mtu - ciphdrsize; remainder = m0->m_pkthdr.len - off; prev = m0; do { fragsize = MIN(totalhdrsize + remainder, mtu); - m = m_get2(fragsize, M_NOWAIT, MT_DATA, M_PKTHDR); + m = m_get2(fragsize, IEEE80211_M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) goto bad; /* leave room to prepend any cipher header */ m_align(m, fragsize - ciphdrsize); /* * Form the header in the fragment. Note that since * we mark the first fragment with the MORE_FRAG bit * it automatically is propagated to each fragment; we * need only clear it on the last fragment (done below). * NB: frag 1+ dont have Mesh Control field present. */ whf = mtod(m, struct ieee80211_frame *); memcpy(whf, wh, hdrsize); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) ieee80211_getqos(wh)[1] &= ~IEEE80211_QOS_MC; #endif *(uint16_t *)&whf->i_seq[0] |= htole16( (fragno & IEEE80211_SEQ_FRAG_MASK) << IEEE80211_SEQ_FRAG_SHIFT); fragno++; payload = fragsize - totalhdrsize; /* NB: destination is known to be contiguous */ m_copydata(m0, off, payload, mtod(m, uint8_t *) + hdrspace); m->m_len = hdrspace + payload; m->m_pkthdr.len = hdrspace + payload; m->m_flags |= M_FRAG; /* chain up the fragment */ prev->m_nextpkt = m; prev = m; /* deduct fragment just formed */ remainder -= payload; off += payload; } while (remainder != 0); /* set the last fragment */ m->m_flags |= M_LASTFRAG; whf->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; /* strip first mbuf now that everything has been copied */ m_adj(m0, -(m0->m_pkthdr.len - (mtu - ciphdrsize))); m0->m_flags |= M_FIRSTFRAG | M_FRAG; vap->iv_stats.is_tx_fragframes++; vap->iv_stats.is_tx_frags += fragno-1; return 1; bad: /* reclaim fragments but leave original frame for caller to free */ ieee80211_free_mbuf(m0->m_nextpkt); m0->m_nextpkt = NULL; return 0; } /* * Add a supported rates element id to a frame. */ uint8_t * ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs) { int nrates; *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); return frm + nrates; } /* * Add an extended supported rates element id to a frame. */ uint8_t * ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs) { /* * Add an extended supported rates element if operating in 11g mode. */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } return frm; } /* * Add an ssid element to a frame. */ uint8_t * ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) { *frm++ = IEEE80211_ELEMID_SSID; *frm++ = len; memcpy(frm, ssid, len); return frm + len; } /* * Add an erp element to a frame. */ static uint8_t * ieee80211_add_erp(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; uint8_t erp; *frm++ = IEEE80211_ELEMID_ERP; *frm++ = 1; erp = 0; /* * TODO: This uses the global flags for now because * the per-VAP flags are fine for per-VAP, but don't * take into account which VAPs share the same channel * and which are on different channels. * * ERP and HT/VHT protection mode is a function of * how many stations are on a channel, not specifically * the VAP or global. But, until we grow that status, * the global flag will have to do. */ if (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) erp |= IEEE80211_ERP_NON_ERP_PRESENT; /* * TODO: same as above; these should be based not * on the vap or ic flags, but instead on a combination * of per-VAP and channels. */ if (ic->ic_flags & IEEE80211_F_USEPROT) erp |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) erp |= IEEE80211_ERP_LONG_PREAMBLE; *frm++ = erp; return frm; } /* * Add a CFParams element to a frame. */ static uint8_t * ieee80211_add_cfparms(uint8_t *frm, struct ieee80211com *ic) { #define ADDSHORT(frm, v) do { \ le16enc(frm, v); \ frm += 2; \ } while (0) *frm++ = IEEE80211_ELEMID_CFPARMS; *frm++ = 6; *frm++ = 0; /* CFP count */ *frm++ = 2; /* CFP period */ ADDSHORT(frm, 0); /* CFP MaxDuration (TU) */ ADDSHORT(frm, 0); /* CFP CurRemaining (TU) */ return frm; #undef ADDSHORT } static __inline uint8_t * add_appie(uint8_t *frm, const struct ieee80211_appie *ie) { memcpy(frm, ie->ie_data, ie->ie_len); return frm + ie->ie_len; } static __inline uint8_t * add_ie(uint8_t *frm, const uint8_t *ie) { memcpy(frm, ie, 2 + ie[1]); return frm + 2 + ie[1]; } #define WME_OUI_BYTES 0x00, 0x50, 0xf2 /* * Add a WME information element to a frame. */ uint8_t * ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme, struct ieee80211_node *ni) { static const uint8_t oui[4] = { WME_OUI_BYTES, WME_OUI_TYPE }; struct ieee80211vap *vap = ni->ni_vap; *frm++ = IEEE80211_ELEMID_VENDOR; *frm++ = sizeof(struct ieee80211_wme_info) - 2; memcpy(frm, oui, sizeof(oui)); frm += sizeof(oui); *frm++ = WME_INFO_OUI_SUBTYPE; *frm++ = WME_VERSION; /* QoS info field depends upon operating mode */ switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: *frm = wme->wme_bssChanParams.cap_info; if (vap->iv_flags_ext & IEEE80211_FEXT_UAPSD) *frm |= WME_CAPINFO_UAPSD_EN; frm++; break; case IEEE80211_M_STA: /* * NB: UAPSD drivers must set this up in their * VAP creation method. */ *frm++ = vap->iv_uapsdinfo; break; default: *frm++ = 0; break; } return frm; } /* * Add a WME parameters element to a frame. */ static uint8_t * ieee80211_add_wme_param(uint8_t *frm, struct ieee80211_wme_state *wme, int uapsd_enable) { #define ADDSHORT(frm, v) do { \ le16enc(frm, v); \ frm += 2; \ } while (0) /* NB: this works 'cuz a param has an info at the front */ static const struct ieee80211_wme_info param = { .wme_id = IEEE80211_ELEMID_VENDOR, .wme_len = sizeof(struct ieee80211_wme_param) - 2, .wme_oui = { WME_OUI_BYTES }, .wme_type = WME_OUI_TYPE, .wme_subtype = WME_PARAM_OUI_SUBTYPE, .wme_version = WME_VERSION, }; int i; memcpy(frm, ¶m, sizeof(param)); frm += __offsetof(struct ieee80211_wme_info, wme_info); *frm = wme->wme_bssChanParams.cap_info; /* AC info */ if (uapsd_enable) *frm |= WME_CAPINFO_UAPSD_EN; frm++; *frm++ = 0; /* reserved field */ /* XXX TODO - U-APSD bits - SP, flags below */ for (i = 0; i < WME_NUM_AC; i++) { const struct wmeParams *ac = &wme->wme_bssChanParams.cap_wmeParams[i]; *frm++ = _IEEE80211_SHIFTMASK(i, WME_PARAM_ACI) | _IEEE80211_SHIFTMASK(ac->wmep_acm, WME_PARAM_ACM) | _IEEE80211_SHIFTMASK(ac->wmep_aifsn, WME_PARAM_AIFSN) ; *frm++ = _IEEE80211_SHIFTMASK(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX) | _IEEE80211_SHIFTMASK(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN) ; ADDSHORT(frm, ac->wmep_txopLimit); } return frm; #undef ADDSHORT } #undef WME_OUI_BYTES /* * Add an 11h Power Constraint element to a frame. */ static uint8_t * ieee80211_add_powerconstraint(uint8_t *frm, struct ieee80211vap *vap) { const struct ieee80211_channel *c = vap->iv_bss->ni_chan; /* XXX per-vap tx power limit? */ int8_t limit = vap->iv_ic->ic_txpowlimit / 2; frm[0] = IEEE80211_ELEMID_PWRCNSTR; frm[1] = 1; frm[2] = c->ic_maxregpower > limit ? c->ic_maxregpower - limit : 0; return frm + 3; } /* * Add an 11h Power Capability element to a frame. */ static uint8_t * ieee80211_add_powercapability(uint8_t *frm, const struct ieee80211_channel *c) { frm[0] = IEEE80211_ELEMID_PWRCAP; frm[1] = 2; frm[2] = c->ic_minpower; frm[3] = c->ic_maxpower; return frm + 4; } /* * Add an 11h Supported Channels element to a frame. */ static uint8_t * ieee80211_add_supportedchannels(uint8_t *frm, struct ieee80211com *ic) { static const int ielen = 26; frm[0] = IEEE80211_ELEMID_SUPPCHAN; frm[1] = ielen; /* XXX not correct */ memcpy(frm+2, ic->ic_chan_avail, ielen); return frm + 2 + ielen; } /* * Add an 11h Quiet time element to a frame. */ static uint8_t * ieee80211_add_quiet(uint8_t *frm, struct ieee80211vap *vap, int update) { struct ieee80211_quiet_ie *quiet = (struct ieee80211_quiet_ie *) frm; quiet->quiet_ie = IEEE80211_ELEMID_QUIET; quiet->len = 6; /* * Only update every beacon interval - otherwise probe responses * would update the quiet count value. */ if (update) { if (vap->iv_quiet_count_value == 1) vap->iv_quiet_count_value = vap->iv_quiet_count; else if (vap->iv_quiet_count_value > 1) vap->iv_quiet_count_value--; } if (vap->iv_quiet_count_value == 0) { /* value 0 is reserved as per 802.11h standerd */ vap->iv_quiet_count_value = 1; } quiet->tbttcount = vap->iv_quiet_count_value; quiet->period = vap->iv_quiet_period; quiet->duration = htole16(vap->iv_quiet_duration); quiet->offset = htole16(vap->iv_quiet_offset); return frm + sizeof(*quiet); } /* * Add an 11h Channel Switch Announcement element to a frame. * Note that we use the per-vap CSA count to adjust the global * counter so we can use this routine to form probe response * frames and get the current count. */ static uint8_t * ieee80211_add_csa(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) frm; csa->csa_ie = IEEE80211_ELEMID_CSA; csa->csa_len = 3; csa->csa_mode = 1; /* XXX force quiet on channel */ csa->csa_newchan = ieee80211_chan2ieee(ic, ic->ic_csa_newchan); csa->csa_count = ic->ic_csa_count - vap->iv_csa_count; return frm + sizeof(*csa); } /* * Add an 11h country information element to a frame. */ static uint8_t * ieee80211_add_countryie(uint8_t *frm, struct ieee80211com *ic) { if (ic->ic_countryie == NULL || ic->ic_countryie_chan != ic->ic_bsschan) { /* * Handle lazy construction of ie. This is done on * first use and after a channel change that requires * re-calculation. */ if (ic->ic_countryie != NULL) IEEE80211_FREE(ic->ic_countryie, M_80211_NODE_IE); ic->ic_countryie = ieee80211_alloc_countryie(ic); if (ic->ic_countryie == NULL) return frm; ic->ic_countryie_chan = ic->ic_bsschan; } return add_appie(frm, ic->ic_countryie); } uint8_t * ieee80211_add_wpa(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA1 && vap->iv_wpa_ie != NULL) return (add_ie(frm, vap->iv_wpa_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_rsn(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA2 && vap->iv_rsn_ie != NULL) return (add_ie(frm, vap->iv_rsn_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_qos(uint8_t *frm, const struct ieee80211_node *ni) { if (ni->ni_flags & IEEE80211_NODE_QOS) { *frm++ = IEEE80211_ELEMID_QOS; *frm++ = 1; *frm++ = 0; } return (frm); } /* * ieee80211_send_probereq(): send a probe request frame with the specified ssid * and any optional information element data; some helper functions as FW based * HW scans need some of that information passed too. */ static uint32_t ieee80211_probereq_ie_len(struct ieee80211vap *vap, struct ieee80211com *ic) { const struct ieee80211_rateset *rs; rs = ieee80211_get_suprates(ic, ic->ic_curchan); /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates (if needed) * [tlv] HT cap (optional) * [tlv] VHT cap (optional) * [tlv] WPA (optional) * [tlv] user-specified ie's */ return ( 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + ((rs->rs_nrates > IEEE80211_RATE_SIZE) ? 2 + (rs->rs_nrates - IEEE80211_RATE_SIZE) : 0) + (((vap->iv_opmode == IEEE80211_M_IBSS) && (vap->iv_flags_ht & IEEE80211_FHT_HT)) ? sizeof(struct ieee80211_ie_htcap) : 0) #ifdef notyet + sizeof(struct ieee80211_ie_htinfo) /* XXX not needed? */ + sizeof(struct ieee80211_ie_vhtcap) #endif + ((vap->iv_flags & IEEE80211_F_WPA1 && vap->iv_wpa_ie != NULL) ? vap->iv_wpa_ie[1] : 0) + (vap->iv_appie_probereq != NULL ? vap->iv_appie_probereq->ie_len : 0) ); } int ieee80211_probereq_ie(struct ieee80211vap *vap, struct ieee80211com *ic, uint8_t **frmp, uint32_t *frmlen, const uint8_t *ssid, size_t ssidlen, bool alloc) { const struct ieee80211_rateset *rs; uint8_t *frm; uint32_t len; if (!alloc && (frmp == NULL || frmlen == NULL)) return (EINVAL); len = ieee80211_probereq_ie_len(vap, ic); if (!alloc && len > *frmlen) return (ENOBUFS); /* For HW scans we usually do not pass in the SSID as IE. */ if (ssidlen == -1) len -= (2 + IEEE80211_NWID_LEN); if (alloc) { - frm = malloc(len, M_80211_VAP, M_WAITOK | M_ZERO); + frm = IEEE80211_MALLOC(len, M_80211_VAP, + IEEE80211_M_WAITOK | IEEE80211_M_ZERO); *frmp = frm; *frmlen = len; } else frm = *frmp; if (ssidlen != -1) frm = ieee80211_add_ssid(frm, ssid, ssidlen); rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_xrates(frm, rs); /* * Note: we can't use bss; we don't have one yet. * * So, we should announce our capabilities * in this channel mode (2g/5g), not the * channel details itself. */ if ((vap->iv_opmode == IEEE80211_M_IBSS) && (vap->iv_flags_ht & IEEE80211_FHT_HT)) { struct ieee80211_channel *c; /* * Get the HT channel that we should try upgrading to. * If we can do 40MHz then this'll upgrade it appropriately. */ c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); frm = ieee80211_add_htcap_ch(frm, vap, c); } /* * XXX TODO: need to figure out what/how to update the * VHT channel. */ #ifdef notyet if (vap->iv_flags_vht & IEEE80211_FVHT_VHT) { struct ieee80211_channel *c; c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); c = ieee80211_vht_adjust_channel(ic, c, vap->iv_flags_vht); frm = ieee80211_add_vhtcap_ch(frm, vap, c); } #endif frm = ieee80211_add_wpa(frm, vap); if (vap->iv_appie_probereq != NULL) frm = add_appie(frm, vap->iv_appie_probereq); if (!alloc) { *frmp = frm; *frmlen = len; } return (0); } int ieee80211_send_probereq(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t *ssid, size_t ssidlen) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node *bss; const struct ieee80211_txparam *tp; struct ieee80211_bpf_params params; struct mbuf *m; uint8_t *frm; uint32_t frmlen; int ret; bss = ieee80211_ref_node(vap->iv_bss); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "probe request"); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(bss); return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); /* See comments above for entire frame format. */ frmlen = ieee80211_probereq_ie_len(vap, ic); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), frmlen); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); ieee80211_free_node(bss); return ENOMEM; } ret = ieee80211_probereq_ie(vap, ic, &frm, &frmlen, ssid, ssidlen, false); KASSERT(ret == 0, ("%s: ieee80211_probereq_ie failed: %d\n", __func__, ret)); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); KASSERT(M_LEADINGSPACE(m) >= sizeof(struct ieee80211_frame), ("leading space %zd", M_LEADINGSPACE(m))); - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); ieee80211_free_node(bss); return ENOMEM; } IEEE80211_TX_LOCK(ic); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ, IEEE80211_NONQOS_TID, sa, da, bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_probereq); IEEE80211_NODE_STAT(ni, tx_mgmt); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe req on channel %u bssid %s sa %6D da %6D ssid \"%.*s\"\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(bssid), sa, ":", da, ":", ssidlen, ssid); memset(¶ms, 0, sizeof(params)); params.ibp_pri = M_WME_GETAC(m); tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; params.ibp_rate0 = tp->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) { params.ibp_flags |= IEEE80211_BPF_NOACK; params.ibp_try0 = 1; } else params.ibp_try0 = tp->maxretry; params.ibp_power = ni->ni_txpower; ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(bss); return (ret); } /* * Calculate capability information for mgt frames. */ uint16_t ieee80211_getcapinfo(struct ieee80211vap *vap, struct ieee80211_channel *chan) { uint16_t capinfo; KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("station mode")); if (vap->iv_opmode == IEEE80211_M_HOSTAP) capinfo = IEEE80211_CAPINFO_ESS; else if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = 0; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((vap->iv_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(chan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (vap->iv_flags & IEEE80211_F_SHSLOT) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if (IEEE80211_IS_CHAN_5GHZ(chan) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; return capinfo; } /* * Send a management frame. The node is for the destination (or ic_bss * when in station mode). Nodes other than ic_bss have their reference * count bumped to reflect our use for an indeterminant time. */ int ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg) { #define HTFLAGS (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT) #define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node *bss = vap->iv_bss; struct ieee80211_bpf_params params; struct mbuf *m; uint8_t *frm; uint16_t capinfo; int has_challenge, is_shared_key, ret, status; KASSERT(ni != NULL, ("null node")); /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); memset(¶ms, 0, sizeof(params)); switch (type) { case IEEE80211_FC0_SUBTYPE_AUTH: status = arg >> 16; arg &= 0xffff; has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE || arg == IEEE80211_AUTH_SHARED_RESPONSE) && ni->ni_challenge != NULL); /* * Deduce whether we're doing open authentication or * shared key authentication. We do the latter if * we're in the middle of a shared key authentication * handshake or if we're initiating an authentication * request and configured to use shared key. */ is_shared_key = has_challenge || arg >= IEEE80211_AUTH_SHARED_RESPONSE || (arg == IEEE80211_AUTH_SHARED_REQUEST && bss->ni_authmode == IEEE80211_AUTH_SHARED); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 3 * sizeof(uint16_t) + (has_challenge && status == IEEE80211_STATUS_SUCCESS ? sizeof(uint16_t)+IEEE80211_CHALLENGE_LEN : 0)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); ((uint16_t *)frm)[0] = (is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED) : htole16(IEEE80211_AUTH_ALG_OPEN); ((uint16_t *)frm)[1] = htole16(arg); /* sequence number */ ((uint16_t *)frm)[2] = htole16(status);/* status */ if (has_challenge && status == IEEE80211_STATUS_SUCCESS) { ((uint16_t *)frm)[3] = htole16((IEEE80211_CHALLENGE_LEN << 8) | IEEE80211_ELEMID_CHALLENGE); memcpy(&((uint16_t *)frm)[4], ni->ni_challenge, IEEE80211_CHALLENGE_LEN); m->m_pkthdr.len = m->m_len = 4 * sizeof(uint16_t) + IEEE80211_CHALLENGE_LEN; if (arg == IEEE80211_AUTH_SHARED_RESPONSE) { IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "request encrypt frame (%s)", __func__); /* mark frame for encryption */ params.ibp_flags |= IEEE80211_BPF_CRYPTO; } } else m->m_pkthdr.len = m->m_len = 3 * sizeof(uint16_t); /* XXX not right for shared key */ if (status == IEEE80211_STATUS_SUCCESS) IEEE80211_NODE_STAT(ni, tx_auth); else IEEE80211_NODE_STAT(ni, tx_auth_fail); if (vap->iv_opmode == IEEE80211_M_STA) ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "send station deauthenticate (reason: %d (%s))", arg, ieee80211_reason_to_string(arg)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_deauth); IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg); ieee80211_node_unauthorize(ni); /* port closed */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: /* * asreq frame format * [2] capability information * [2] listen interval * [6*] current AP address (reassoc only) * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [4] power capability (optional) * [28] supported channels (optional) * [tlv] HT capabilities * [tlv] VHT capabilities * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Atheros capabilities (if negotiated) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + IEEE80211_ADDR_LEN + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 4 + 2 + 26 + sizeof(struct ieee80211_wme_info) + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_vhtcap) + 4 + sizeof(struct ieee80211_ie_htcap) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_wpa != NULL ? vap->iv_appie_wpa->ie_len : 0) + (vap->iv_appie_assocreq != NULL ? vap->iv_appie_assocreq->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode %u", vap->iv_opmode)); capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; /* * NB: Some 11a AP's reject the request when * short preamble is set. */ if ((vap->iv_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && (ic->ic_caps & IEEE80211_C_SHSLOT)) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if ((ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; *(uint16_t *)frm = htole16(capinfo); frm += 2; KASSERT(bss->ni_intval != 0, ("beacon interval is zero!")); *(uint16_t *)frm = htole16(howmany(ic->ic_lintval, bss->ni_intval)); frm += 2; if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { IEEE80211_ADDR_COPY(frm, bss->ni_bssid); frm += IEEE80211_ADDR_LEN; } frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen); frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_xrates(frm, &ni->ni_rates); if (capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) { frm = ieee80211_add_powercapability(frm, ic->ic_curchan); frm = ieee80211_add_supportedchannels(frm, ic); } /* * Check the channel - we may be using an 11n NIC with an * 11n capable station, but we're configured to be an 11b * channel. */ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_HTCAP) { frm = ieee80211_add_htcap(frm, ni); } if ((vap->iv_flags_vht & IEEE80211_FVHT_VHT) && IEEE80211_IS_CHAN_VHT(ni->ni_chan) && ni->ni_ies.vhtcap_ie != NULL && ni->ni_ies.vhtcap_ie[0] == IEEE80211_ELEMID_VHT_CAP) { frm = ieee80211_add_vhtcap(frm, ni); } frm = ieee80211_add_wpa(frm, vap); if ((ic->ic_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_info(frm, &ic->ic_wme, ni); /* * Same deal - only send HT info if we're on an 11n * capable channel. */ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_VENDOR) { frm = ieee80211_add_htcap_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) { frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); } #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocreq != NULL) frm = add_appie(frm, vap->iv_appie_assocreq); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] HT capabilities (standard, if STA enabled) * [tlv] HT information (standard, if STA enabled) * [tlv] VHT capabilities (standard, if STA enabled) * [tlv] VHT information (standard, if STA enabled) * [tlv] WME (if configured and STA enabled) * [tlv] HT capabilities (vendor OUI, if STA enabled) * [tlv] HT information (vendor OUI, if STA enabled) * [tlv] Atheros capabilities (if STA enabled) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) + 4 + sizeof(struct ieee80211_ie_vhtcap) + sizeof(struct ieee80211_ie_vht_operation) + sizeof(struct ieee80211_wme_param) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_assocresp != NULL ? vap->iv_appie_assocresp->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; *(uint16_t *)frm = htole16(arg); /* status */ frm += 2; if (arg == IEEE80211_STATUS_SUCCESS) { *(uint16_t *)frm = htole16(ni->ni_associd); IEEE80211_NODE_STAT(ni, tx_assoc); } else IEEE80211_NODE_STAT(ni, tx_assoc_fail); frm += 2; frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_xrates(frm, &ni->ni_rates); /* NB: respond according to what we received */ if ((ni->ni_flags & HTFLAGS) == IEEE80211_NODE_HT) { frm = ieee80211_add_htcap(frm, ni); frm = ieee80211_add_htinfo(frm, ni); } if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_param(frm, &ic->ic_wme, !! (vap->iv_flags_ext & IEEE80211_FEXT_UAPSD)); if ((ni->ni_flags & HTFLAGS) == HTFLAGS) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } if (ni->ni_flags & IEEE80211_NODE_VHT) { frm = ieee80211_add_vhtcap(frm, ni); frm = ieee80211_add_vhtinfo(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocresp != NULL) frm = add_appie(frm, vap->iv_appie_assocresp); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); break; case IEEE80211_FC0_SUBTYPE_DISASSOC: IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "send station disassociate (reason: %d (%s))", arg, ieee80211_reason_to_string(arg)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_disassoc); IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg); break; default: IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni, "invalid mgmt frame type %u", type); senderr(EINVAL, is_tx_unknownmgt); /* NOTREACHED */ } /* NB: force non-ProbeResp frames to the highest queue */ params.ibp_pri = WME_AC_VO; params.ibp_rate0 = bss->ni_txparms->mgmtrate; /* NB: we know all frames are unicast */ params.ibp_try0 = bss->ni_txparms->maxretry; params.ibp_power = bss->ni_txpower; return ieee80211_mgmt_output(ni, m, type, ¶ms); bad: ieee80211_free_node(ni); return ret; #undef senderr #undef HTFLAGS } /* * Return an mbuf with a probe response frame in it. * Space is left to prepend and 802.11 header at the * front but it's left to the caller to fill in. */ struct mbuf * ieee80211_alloc_proberesp(struct ieee80211_node *bss, int legacy) { struct ieee80211vap *vap = bss->ni_vap; struct ieee80211com *ic = bss->ni_ic; const struct ieee80211_rateset *rs; struct mbuf *m; uint16_t capinfo; uint8_t *frm; /* * probe response frame format * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [tlv] parameter set (FH/DS) * [tlv] parameter set (IBSS) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN (optional) * [tlv] HT capabilities * [tlv] HT information * [tlv] VHT capabilities * [tlv] VHT information * [tlv] WPA (optional) * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities * [tlv] AppIE's (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 8 + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 7 /* max(7,3) */ + IEEE80211_COUNTRY_MAX_SIZE + 3 + sizeof(struct ieee80211_csa_ie) + sizeof(struct ieee80211_quiet_ie) + 3 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_htinfo) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_wme_param) + 4 + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) + sizeof(struct ieee80211_ie_vhtcap) + sizeof(struct ieee80211_ie_vht_operation) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) #endif + (vap->iv_appie_proberesp != NULL ? vap->iv_appie_proberesp->ie_len : 0) ); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; return NULL; } memset(frm, 0, 8); /* timestamp should be filled later */ frm += 8; *(uint16_t *)frm = htole16(bss->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; frm = ieee80211_add_ssid(frm, bss->ni_essid, bss->ni_esslen); rs = ieee80211_get_suprates(ic, bss->ni_chan); frm = ieee80211_add_rates(frm, rs); if (IEEE80211_IS_CHAN_FHSS(bss->ni_chan)) { *frm++ = IEEE80211_ELEMID_FHPARMS; *frm++ = 5; *frm++ = bss->ni_fhdwell & 0x00ff; *frm++ = (bss->ni_fhdwell >> 8) & 0x00ff; *frm++ = IEEE80211_FH_CHANSET( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = IEEE80211_FH_CHANPAT( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = bss->ni_fhindex; } else { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, bss->ni_chan); } if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ } if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(bss->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet) frm = ieee80211_add_quiet(frm, vap, 0); } } if (IEEE80211_IS_CHAN_ANYG(bss->ni_chan)) frm = ieee80211_add_erp(frm, vap); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); /* * NB: legacy 11b clients do not get certain ie's. * The caller identifies such clients by passing * a token in legacy to us. Could expand this to be * any legacy client for stuff like HT ie's. */ if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap(frm, bss); frm = ieee80211_add_htinfo(frm, bss); } if (IEEE80211_IS_CHAN_VHT(bss->ni_chan) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_vhtcap(frm, bss); frm = ieee80211_add_vhtinfo(frm, bss); } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) frm = ieee80211_add_wme_param(frm, &ic->ic_wme, !! (vap->iv_flags_ext & IEEE80211_FEXT_UAPSD)); if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap_vendor(frm, bss); frm = ieee80211_add_htinfo_vendor(frm, bss); } #ifdef IEEE80211_SUPPORT_SUPERG if ((vap->iv_flags & IEEE80211_F_ATHEROS) && legacy != IEEE80211_SEND_LEGACY_11B) frm = ieee80211_add_athcaps(frm, bss); #endif if (vap->iv_appie_proberesp != NULL) frm = add_appie(frm, vap->iv_appie_proberesp); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); } #endif m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return m; } /* * Send a probe response frame to the specified mac address. * This does not go through the normal mgt frame api so we * can specify the destination address and re-use the bss node * for the sta reference. */ int ieee80211_send_proberesp(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], int legacy) { struct ieee80211_node *bss = vap->iv_bss; struct ieee80211com *ic = vap->iv_ic; struct mbuf *m; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss, "block %s frame in CAC state", "probe response"); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, bss, ether_sprintf(bss->ni_macaddr), ieee80211_node_refcnt(bss)+1); ieee80211_ref_node(bss); m = ieee80211_alloc_proberesp(bss, legacy); if (m == NULL) { ieee80211_free_node(bss); return ENOMEM; } - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); KASSERT(m != NULL, ("no room for header")); IEEE80211_TX_LOCK(ic); ieee80211_send_setup(bss, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP, IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe resp on channel %u to %s%s\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(da), legacy ? " " : ""); IEEE80211_NODE_STAT(bss, tx_mgmt); ret = ieee80211_raw_output(vap, bss, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Allocate and build a RTS (Request To Send) control frame. */ struct mbuf * ieee80211_alloc_rts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], const uint8_t ta[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_rts *rts; struct mbuf *m; /* XXX honor ic_headroom */ - m = m_gethdr(M_NOWAIT, MT_DATA); + m = m_gethdr(IEEE80211_M_NOWAIT, MT_DATA); if (m != NULL) { rts = mtod(m, struct ieee80211_frame_rts *); rts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_RTS; rts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)rts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(rts->i_ra, ra); IEEE80211_ADDR_COPY(rts->i_ta, ta); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_rts); } return m; } /* * Allocate and build a CTS (Clear To Send) control frame. */ struct mbuf * ieee80211_alloc_cts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_cts *cts; struct mbuf *m; /* XXX honor ic_headroom */ - m = m_gethdr(M_NOWAIT, MT_DATA); + m = m_gethdr(IEEE80211_M_NOWAIT, MT_DATA); if (m != NULL) { cts = mtod(m, struct ieee80211_frame_cts *); cts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_CTS; cts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)cts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(cts->i_ra, ra); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_cts); } return m; } /* * Wrapper for CTS/RTS frame allocation. */ struct mbuf * ieee80211_alloc_prot(struct ieee80211_node *ni, const struct mbuf *m, uint8_t rate, int prot) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_frame *wh; struct mbuf *mprot; uint16_t dur; int pktlen, isshort; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("wrong protection type %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; isshort = (vap->iv_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else mprot = ieee80211_alloc_cts(ic, vap->iv_myaddr, dur); return (mprot); } static void ieee80211_tx_mgt_timeout(void *arg) { struct ieee80211vap *vap = arg; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "vap %p mode %s state %s flags %#x & %#x\n", vap, ieee80211_opmode_name[vap->iv_opmode], ieee80211_state_name[vap->iv_state], vap->iv_ic->ic_flags, IEEE80211_F_SCAN); IEEE80211_LOCK(vap->iv_ic); if (vap->iv_state != IEEE80211_S_INIT && (vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* * NB: it's safe to specify a timeout as the reason here; * it'll only be used in the right state. */ ieee80211_new_state_locked(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_TIMEOUT); } IEEE80211_UNLOCK(vap->iv_ic); } /* * This is the callback set on net80211-sourced transmitted * authentication request frames. * * This does a couple of things: * * + If the frame transmitted was a success, it schedules a future * event which will transition the interface to scan. * If a state transition _then_ occurs before that event occurs, * said state transition will cancel this callout. * * + If the frame transmit was a failure, it immediately schedules * the transition back to scan. */ static void ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211vap *vap = ni->ni_vap; enum ieee80211_state ostate = (enum ieee80211_state)(uintptr_t)arg; /* * Frame transmit completed; arrange timer callback. If * transmit was successfully we wait for response. Otherwise * we arrange an immediate callback instead of doing the * callback directly since we don't know what state the driver * is in (e.g. what locks it is holding). This work should * not be too time-critical and not happen too often so the * added overhead is acceptable. * * XXX what happens if !acked but response shows up before callback? */ if (vap->iv_state == ostate) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "ni %p mode %s state %s arg %p status %d\n", ni, ieee80211_opmode_name[vap->iv_opmode], ieee80211_state_name[vap->iv_state], arg, status); callout_reset(&vap->iv_mgtsend, status == 0 ? IEEE80211_TRANS_WAIT*hz : 0, ieee80211_tx_mgt_timeout, vap); } } static void ieee80211_beacon_construct(struct mbuf *m, uint8_t *frm, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_rateset *rs = &ni->ni_rates; uint16_t capinfo; /* * beacon frame format * * TODO: update to 802.11-2012; a lot of stuff has changed; * vendor extensions should be at the end, etc. * * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * XXX TODO: Quiet * XXX TODO: IBSS DFS * XXX TODO: TPC report * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * XXX TODO: BSSLOAD * (XXX EDCA parameter set, QoS capability?) * XXX TODO: AP channel report * * [tlv] HT capabilities * [tlv] HT information * XXX TODO: 20/40 BSS coexistence * Mesh: * XXX TODO: Meshid * XXX TODO: mesh config * XXX TODO: mesh awake window * XXX TODO: beacon timing (mesh, etc) * XXX TODO: MCCAOP Advertisement Overview * XXX TODO: MCCAOP Advertisement * XXX TODO: Mesh channel switch parameters * VHT: * XXX TODO: VHT capabilities * XXX TODO: VHT operation * XXX TODO: VHT transmit power envelope * XXX TODO: channel switch wrapper element * XXX TODO: extended BSS load element * * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities (optional) * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) */ memset(bo, 0, sizeof(*bo)); memset(frm, 0, 8); /* XXX timestamp is set by hardware/driver */ frm += 8; *(uint16_t *)frm = htole16(ni->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); bo->bo_caps = (uint16_t *)frm; *(uint16_t *)frm = htole16(capinfo); frm += 2; *frm++ = IEEE80211_ELEMID_SSID; if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) { *frm++ = ni->ni_esslen; memcpy(frm, ni->ni_essid, ni->ni_esslen); frm += ni->ni_esslen; } else *frm++ = 0; frm = ieee80211_add_rates(frm, rs); if (!IEEE80211_IS_CHAN_FHSS(ni->ni_chan)) { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan); } if (ic->ic_flags & IEEE80211_F_PCF) { bo->bo_cfp = frm; frm = ieee80211_add_cfparms(frm, ic); } bo->bo_tim = frm; if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ bo->bo_tim_len = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* TIM IE is the same for Mesh and Hostap */ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) frm; tie->tim_ie = IEEE80211_ELEMID_TIM; tie->tim_len = 4; /* length */ tie->tim_count = 0; /* DTIM count */ tie->tim_period = vap->iv_dtim_period; /* DTIM period */ tie->tim_bitctl = 0; /* bitmap control */ tie->tim_bitmap[0] = 0; /* Partial Virtual Bitmap */ frm += sizeof(struct ieee80211_tim_ie); bo->bo_tim_len = 1; } bo->bo_tim_trailer = frm; if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); bo->bo_csa = frm; if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } else bo->bo_csa = frm; bo->bo_quiet = NULL; if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS) && (vap->iv_quiet == 1)) { /* * We only insert the quiet IE offset if * the quiet IE is enabled. Otherwise don't * put it here or we'll just overwrite * some other beacon contents. */ if (vap->iv_quiet) { bo->bo_quiet = frm; frm = ieee80211_add_quiet(frm,vap, 0); } } } if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) { bo->bo_erp = frm; frm = ieee80211_add_erp(frm, vap); } frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { frm = ieee80211_add_htcap(frm, ni); bo->bo_htinfo = frm; frm = ieee80211_add_htinfo(frm, ni); } if (IEEE80211_IS_CHAN_VHT(ni->ni_chan)) { frm = ieee80211_add_vhtcap(frm, ni); bo->bo_vhtinfo = frm; frm = ieee80211_add_vhtinfo(frm, ni); /* Transmit power envelope */ /* Channel switch wrapper element */ /* Extended bss load element */ } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) { bo->bo_wme = frm; frm = ieee80211_add_wme_param(frm, &ic->ic_wme, !! (vap->iv_flags_ext & IEEE80211_FEXT_UAPSD)); } if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT)) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (vap->iv_flags & IEEE80211_F_ATHEROS) { bo->bo_ath = frm; frm = ieee80211_add_athcaps(frm, ni); } #endif #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { bo->bo_tdma = frm; frm = ieee80211_add_tdma(frm, vap); } #endif if (vap->iv_appie_beacon != NULL) { bo->bo_appie = frm; bo->bo_appie_len = vap->iv_appie_beacon->ie_len; frm = add_appie(frm, vap->iv_appie_beacon); } /* XXX TODO: move meshid/meshconf up to before vendor extensions? */ #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); bo->bo_meshconf = frm; frm = ieee80211_add_meshconf(frm, vap); } #endif bo->bo_tim_trailer_len = frm - bo->bo_tim_trailer; bo->bo_csa_trailer_len = frm - bo->bo_csa; m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); } /* * Allocate a beacon frame and fillin the appropriate bits. */ struct mbuf * ieee80211_beacon_alloc(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct mbuf *m; int pktlen; uint8_t *frm; /* * Update the "We're putting the quiet IE in the beacon" state. */ if (vap->iv_quiet == 1) vap->iv_flags_ext |= IEEE80211_FEXT_QUIET_IE; else if (vap->iv_quiet == 0) vap->iv_flags_ext &= ~IEEE80211_FEXT_QUIET_IE; /* * beacon frame format * * Note: This needs updating for 802.11-2012. * * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * [tlv] HT capabilities * [tlv] HT information * [tlv] VHT capabilities * [tlv] VHT operation * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) * NB: we allocate the max space required for the TIM bitmap. * XXX how big is this? */ pktlen = 8 /* time stamp */ + sizeof(uint16_t) /* beacon interval */ + sizeof(uint16_t) /* capabilities */ + 2 + ni->ni_esslen /* ssid */ + 2 + IEEE80211_RATE_SIZE /* supported rates */ + 2 + 1 /* DS parameters */ + 2 + 6 /* CF parameters */ + 2 + 4 + vap->iv_tim_len /* DTIM/IBSSPARMS */ + IEEE80211_COUNTRY_MAX_SIZE /* country */ + 2 + 1 /* power control */ + sizeof(struct ieee80211_csa_ie) /* CSA */ + sizeof(struct ieee80211_quiet_ie) /* Quiet */ + 2 + 1 /* ERP */ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + (vap->iv_caps & IEEE80211_C_WPA ? /* WPA 1+2 */ 2*sizeof(struct ieee80211_ie_wpa) : 0) /* XXX conditional? */ + 4+2*sizeof(struct ieee80211_ie_htcap)/* HT caps */ + 4+2*sizeof(struct ieee80211_ie_htinfo)/* HT info */ + sizeof(struct ieee80211_ie_vhtcap)/* VHT caps */ + sizeof(struct ieee80211_ie_vht_operation)/* VHT info */ + (vap->iv_caps & IEEE80211_C_WME ? /* WME */ sizeof(struct ieee80211_wme_param) : 0) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) /* ATH */ #endif #ifdef IEEE80211_SUPPORT_TDMA + (vap->iv_caps & IEEE80211_C_TDMA ? /* TDMA */ sizeof(struct ieee80211_tdma_param) : 0) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + ni->ni_meshidlen + sizeof(struct ieee80211_meshconf_ie) #endif + IEEE80211_MAX_APPIE ; m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), pktlen); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "%s: cannot get buf; size %u\n", __func__, pktlen); vap->iv_stats.is_tx_nobuf++; return NULL; } ieee80211_beacon_construct(m, frm, ni); - M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); + M_PREPEND(m, sizeof(struct ieee80211_frame), IEEE80211_M_NOWAIT); KASSERT(m != NULL, ("no space for 802.11 header?")); wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(uint16_t *)wh->i_dur = 0; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid); *(uint16_t *)wh->i_seq = 0; return m; } /* * Update the dynamic parts of a beacon frame based on the current state. */ int ieee80211_beacon_update(struct ieee80211_node *ni, struct mbuf *m, int mcast) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211com *ic = ni->ni_ic; int len_changed = 0; uint16_t capinfo; struct ieee80211_frame *wh; ieee80211_seq seqno; IEEE80211_LOCK(ic); /* * Handle 11h channel change when we've reached the count. * We must recalculate the beacon frame contents to account * for the new channel. Note we do this only for the first * vap that reaches this point; subsequent vaps just update * their beacon state to reflect the recalculated channel. */ if (isset(bo->bo_flags, IEEE80211_BEACON_CSA) && vap->iv_csa_count == ic->ic_csa_count) { vap->iv_csa_count = 0; /* * Effect channel change before reconstructing the beacon * frame contents as many places reference ni_chan. */ if (ic->ic_csa_newchan != NULL) ieee80211_csa_completeswitch(ic); /* * NB: ieee80211_beacon_construct clears all pending * updates in bo_flags so we don't need to explicitly * clear IEEE80211_BEACON_CSA. */ ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } /* * Handle the quiet time element being added and removed. * Again, for now we just cheat and reconstruct the whole * beacon - that way the gap is provided as appropriate. * * So, track whether we have already added the IE versus * whether we want to be adding the IE. */ if ((vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE) && (vap->iv_quiet == 0)) { /* * Quiet time beacon IE enabled, but it's disabled; * recalc */ vap->iv_flags_ext &= ~IEEE80211_FEXT_QUIET_IE; ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } if (((vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE) == 0) && (vap->iv_quiet == 1)) { /* * Quiet time beacon IE disabled, but it's now enabled; * recalc */ vap->iv_flags_ext |= IEEE80211_FEXT_QUIET_IE; ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } wh = mtod(m, struct ieee80211_frame *); /* * XXX TODO Strictly speaking this should be incremented with the TX * lock held so as to serialise access to the non-qos TID sequence * number space. * * If the driver identifies it does its own TX seqno management then * we can skip this (and still not do the TX seqno.) */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); /* XXX faster to recalculate entirely or just changes? */ capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); *bo->bo_caps = htole16(capinfo); if (vap->iv_flags & IEEE80211_F_WME) { struct ieee80211_wme_state *wme = &ic->ic_wme; /* * Check for aggressive mode change. When there is * significant high priority traffic in the BSS * throttle back BE traffic by using conservative * parameters. Otherwise BE uses aggressive params * to optimize performance of legacy/non-QoS traffic. */ if (wme->wme_flags & WME_F_AGGRMODE) { if (wme->wme_hipri_traffic > wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, disable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags &= ~WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } else wme->wme_hipri_traffic = 0; } else { if (wme->wme_hipri_traffic <= wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, enable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags |= WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = 0; } else wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } if (isset(bo->bo_flags, IEEE80211_BEACON_WME)) { (void) ieee80211_add_wme_param(bo->bo_wme, wme, vap->iv_flags_ext & IEEE80211_FEXT_UAPSD); clrbit(bo->bo_flags, IEEE80211_BEACON_WME); } } if (isset(bo->bo_flags, IEEE80211_BEACON_HTINFO)) { ieee80211_ht_update_beacon(vap, bo); clrbit(bo->bo_flags, IEEE80211_BEACON_HTINFO); } #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { /* * NB: the beacon is potentially updated every TBTT. */ ieee80211_tdma_update_beacon(vap, bo); } #endif #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) ieee80211_mesh_update_beacon(vap, bo); #endif if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* NB: no IBSS support*/ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) bo->bo_tim; if (isset(bo->bo_flags, IEEE80211_BEACON_TIM)) { u_int timlen, timoff, i; /* * ATIM/DTIM needs updating. If it fits in the * current space allocated then just copy in the * new bits. Otherwise we need to move any trailing * data to make room. Note that we know there is * contiguous space because ieee80211_beacon_allocate * insures there is space in the mbuf to write a * maximal-size virtual bitmap (based on iv_max_aid). */ /* * Calculate the bitmap size and offset, copy any * trailer out of the way, and then copy in the * new bitmap and update the information element. * Note that the tim bitmap must contain at least * one byte and any offset must be even. */ if (vap->iv_ps_pending != 0) { timoff = 128; /* impossibly large */ for (i = 0; i < vap->iv_tim_len; i++) if (vap->iv_tim_bitmap[i]) { timoff = i &~ 1; break; } KASSERT(timoff != 128, ("tim bitmap empty!")); for (i = vap->iv_tim_len-1; i >= timoff; i--) if (vap->iv_tim_bitmap[i]) break; timlen = 1 + (i - timoff); } else { timoff = 0; timlen = 1; } /* * TODO: validate this! */ if (timlen != bo->bo_tim_len) { /* copy up/down trailer */ int adjust = tie->tim_bitmap+timlen - bo->bo_tim_trailer; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_erp += adjust; bo->bo_htinfo += adjust; bo->bo_vhtinfo += adjust; #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += adjust; #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += adjust; #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += adjust; #endif bo->bo_appie += adjust; bo->bo_wme += adjust; bo->bo_csa += adjust; bo->bo_quiet += adjust; bo->bo_tim_len = timlen; /* update information element */ tie->tim_len = 3 + timlen; tie->tim_bitctl = timoff; len_changed = 1; } memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff, bo->bo_tim_len); clrbit(bo->bo_flags, IEEE80211_BEACON_TIM); IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER, "%s: TIM updated, pending %u, off %u, len %u\n", __func__, vap->iv_ps_pending, timoff, timlen); } /* count down DTIM period */ if (tie->tim_count == 0) tie->tim_count = tie->tim_period - 1; else tie->tim_count--; /* update state for buffered multicast frames on DTIM */ if (mcast && tie->tim_count == 0) tie->tim_bitctl |= 1; else tie->tim_bitctl &= ~1; if (isset(bo->bo_flags, IEEE80211_BEACON_CSA)) { struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) bo->bo_csa; /* * Insert or update CSA ie. If we're just starting * to count down to the channel switch then we need * to insert the CSA ie. Otherwise we just need to * drop the count. The actual change happens above * when the vap's count reaches the target count. */ if (vap->iv_csa_count == 0) { memmove(&csa[1], csa, bo->bo_csa_trailer_len); bo->bo_erp += sizeof(*csa); bo->bo_htinfo += sizeof(*csa); bo->bo_vhtinfo += sizeof(*csa); bo->bo_wme += sizeof(*csa); #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += sizeof(*csa); #endif bo->bo_appie += sizeof(*csa); bo->bo_csa_trailer_len += sizeof(*csa); bo->bo_quiet += sizeof(*csa); bo->bo_tim_trailer_len += sizeof(*csa); m->m_len += sizeof(*csa); m->m_pkthdr.len += sizeof(*csa); ieee80211_add_csa(bo->bo_csa, vap); } else csa->csa_count--; vap->iv_csa_count++; /* NB: don't clear IEEE80211_BEACON_CSA */ } /* * Only add the quiet time IE if we've enabled it * as appropriate. */ if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet && (vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE)) { ieee80211_add_quiet(bo->bo_quiet, vap, 1); } } if (isset(bo->bo_flags, IEEE80211_BEACON_ERP)) { /* * ERP element needs updating. */ (void) ieee80211_add_erp(bo->bo_erp, vap); clrbit(bo->bo_flags, IEEE80211_BEACON_ERP); } #ifdef IEEE80211_SUPPORT_SUPERG if (isset(bo->bo_flags, IEEE80211_BEACON_ATH)) { ieee80211_add_athcaps(bo->bo_ath, ni); clrbit(bo->bo_flags, IEEE80211_BEACON_ATH); } #endif } if (isset(bo->bo_flags, IEEE80211_BEACON_APPIE)) { const struct ieee80211_appie *aie = vap->iv_appie_beacon; int aielen; uint8_t *frm; aielen = 0; if (aie != NULL) aielen += aie->ie_len; if (aielen != bo->bo_appie_len) { /* copy up/down trailer */ int adjust = aielen - bo->bo_appie_len; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_appie += adjust; bo->bo_appie_len = aielen; len_changed = 1; } frm = bo->bo_appie; if (aie != NULL) frm = add_appie(frm, aie); clrbit(bo->bo_flags, IEEE80211_BEACON_APPIE); } IEEE80211_UNLOCK(ic); return len_changed; } /* * Do Ethernet-LLC encapsulation for each payload in a fast frame * tunnel encapsulation. The frame is assumed to have an Ethernet * header at the front that must be stripped before prepending the * LLC followed by the Ethernet header passed in (with an Ethernet * type that specifies the payload size). */ struct mbuf * ieee80211_ff_encap1(struct ieee80211vap *vap, struct mbuf *m, const struct ether_header *eh) { struct llc *llc; uint16_t payload; /* XXX optimize by combining m_adj+M_PREPEND */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh->ether_type; payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */ - M_PREPEND(m, sizeof(struct ether_header), M_NOWAIT); + M_PREPEND(m, sizeof(struct ether_header), IEEE80211_M_NOWAIT); if (m == NULL) { /* XXX cannot happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no space for ether_header\n", __func__); vap->iv_stats.is_tx_nobuf++; return NULL; } ETHER_HEADER_COPY(mtod(m, void *), eh); mtod(m, struct ether_header *)->ether_type = htons(payload); return m; } /* * Complete an mbuf transmission. * * For now, this simply processes a completed frame after the * driver has completed it's transmission and/or retransmission. * It assumes the frame is an 802.11 encapsulated frame. * * Later on it will grow to become the exit path for a given frame * from the driver and, depending upon how it's been encapsulated * and already transmitted, it may end up doing A-MPDU retransmission, * power save requeuing, etc. * * In order for the above to work, the driver entry point to this * must not hold any driver locks. Thus, the driver needs to delay * any actual mbuf completion until it can release said locks. * * This frees the mbuf and if the mbuf has a node reference, * the node reference will be freed. */ void ieee80211_tx_complete(struct ieee80211_node *ni, struct mbuf *m, int status) { if (ni != NULL) { struct ifnet *ifp = ni->ni_vap->iv_ifp; if (status == 0) { if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (m->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (m->m_flags & M_TXCB) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG, "ni %p vap %p mode %s state %s m %p status %d\n", ni, ni->ni_vap, ieee80211_opmode_name[ni->ni_vap->iv_opmode], ieee80211_state_name[ni->ni_vap->iv_state], m, status); ieee80211_process_callback(ni, m, status); } ieee80211_free_node(ni); } m_freem(m); } diff --git a/sys/net80211/ieee80211_superg.c b/sys/net80211/ieee80211_superg.c index 4ac7813d2811..5da97577165b 100644 --- a/sys/net80211/ieee80211_superg.c +++ b/sys/net80211/ieee80211_superg.c @@ -1,1065 +1,1065 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #ifdef IEEE80211_SUPPORT_SUPERG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Atheros fast-frame encapsulation format. * FF max payload: * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500: * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500 * = 3066 */ /* fast frame header is 32-bits */ #define ATH_FF_PROTO 0x0000003f /* protocol */ #define ATH_FF_PROTO_S 0 #define ATH_FF_FTYPE 0x000000c0 /* frame type */ #define ATH_FF_FTYPE_S 6 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */ #define ATH_FF_HLEN32_S 8 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */ #define ATH_FF_SEQNUM_S 10 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */ #define ATH_FF_OFFSET_S 21 #define ATH_FF_MAX_HDR_PAD 4 #define ATH_FF_MAX_SEP_PAD 6 #define ATH_FF_MAX_HDR 30 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */ #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */ #define ATH_FF_SNAP_ORGCODE_0 0x00 #define ATH_FF_SNAP_ORGCODE_1 0x03 #define ATH_FF_SNAP_ORGCODE_2 0x7f #define ATH_FF_TXQMIN 2 /* min txq depth for staging */ #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */ #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/ #define ETHER_HEADER_COPY(dst, src) \ memcpy(dst, src, sizeof(struct ether_header)) static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */ SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW, &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging"); static int ieee80211_ffagemax = -1; /* max time frames held on stage q */ SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I", "max hold time for fast-frame staging (ms)"); static void ff_age_all(void *arg, int npending) { struct ieee80211com *ic = arg; /* XXX cache timer value somewhere (racy) */ ieee80211_ff_age_all(ic, ieee80211_ffagemax + 1); } void ieee80211_superg_attach(struct ieee80211com *ic) { struct ieee80211_superg *sg; IEEE80211_FF_LOCK_INIT(ic, ic->ic_name); sg = (struct ieee80211_superg *) IEEE80211_MALLOC( sizeof(struct ieee80211_superg), M_80211_VAP, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); if (sg == NULL) { printf("%s: cannot allocate SuperG state block\n", __func__); return; } TIMEOUT_TASK_INIT(ic->ic_tq, &sg->ff_qtimer, 0, ff_age_all, ic); ic->ic_superg = sg; /* * Default to not being so aggressive for FF/AMSDU * aging, otherwise we may hold a frame around * for way too long before we expire it out. */ ieee80211_ffagemax = msecs_to_ticks(2); } void ieee80211_superg_detach(struct ieee80211com *ic) { if (ic->ic_superg != NULL) { struct timeout_task *qtask = &ic->ic_superg->ff_qtimer; while (taskqueue_cancel_timeout(ic->ic_tq, qtask, NULL) != 0) taskqueue_drain_timeout(ic->ic_tq, qtask); IEEE80211_FREE(ic->ic_superg, M_80211_VAP); ic->ic_superg = NULL; } IEEE80211_FF_LOCK_DESTROY(ic); } void ieee80211_superg_vattach(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */ vap->iv_caps &= ~IEEE80211_C_FF; if (vap->iv_caps & IEEE80211_C_FF) vap->iv_flags |= IEEE80211_F_FF; /* NB: we only implement sta mode */ if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_caps & IEEE80211_C_TURBOP)) vap->iv_flags |= IEEE80211_F_TURBOP; } void ieee80211_superg_vdetach(struct ieee80211vap *vap) { } #define ATH_OUI_BYTES 0x00, 0x03, 0x7f /* * Add a WME information element to a frame. */ uint8_t * ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix) { static const struct ieee80211_ath_ie info = { .ath_id = IEEE80211_ELEMID_VENDOR, .ath_len = sizeof(struct ieee80211_ath_ie) - 2, .ath_oui = { ATH_OUI_BYTES }, .ath_oui_type = ATH_OUI_TYPE, .ath_oui_subtype= ATH_OUI_SUBTYPE, .ath_version = ATH_OUI_VERSION, }; struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm; memcpy(frm, &info, sizeof(info)); ath->ath_capability = caps; if (defkeyix != IEEE80211_KEYIX_NONE) { ath->ath_defkeyix[0] = (defkeyix & 0xff); ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff); } else { ath->ath_defkeyix[0] = 0xff; ath->ath_defkeyix[1] = 0x7f; } return frm + sizeof(info); } #undef ATH_OUI_BYTES uint8_t * ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss) { const struct ieee80211vap *vap = bss->ni_vap; return ieee80211_add_ath(frm, vap->iv_flags & IEEE80211_F_ATHEROS, ((vap->iv_flags & IEEE80211_F_WPA) == 0 && bss->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); } void ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie) { const struct ieee80211_ath_ie *ath = (const struct ieee80211_ath_ie *) ie; ni->ni_ath_flags = ath->ath_capability; ni->ni_ath_defkeyix = le16dec(&ath->ath_defkeyix); } int ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm, const struct ieee80211_frame *wh) { struct ieee80211vap *vap = ni->ni_vap; const struct ieee80211_ath_ie *ath; u_int len = frm[1]; int capschanged; uint16_t defkeyix; if (len < sizeof(struct ieee80211_ath_ie)-2) { IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG, wh, "Atheros", "too short, len %u", len); return -1; } ath = (const struct ieee80211_ath_ie *)frm; capschanged = (ni->ni_ath_flags != ath->ath_capability); defkeyix = le16dec(ath->ath_defkeyix); if (capschanged || defkeyix != ni->ni_ath_defkeyix) { ni->ni_ath_flags = ath->ath_capability; ni->ni_ath_defkeyix = defkeyix; IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, "ath ie change: new caps 0x%x defkeyix 0x%x", ni->ni_ath_flags, ni->ni_ath_defkeyix); } if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) { uint16_t curflags, newflags; /* * Check for turbo mode switch. Calculate flags * for the new mode and effect the switch. */ newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags; /* NB: BOOST is not in ic_flags, so get it from the ie */ if (ath->ath_capability & ATHEROS_CAP_BOOST) newflags |= IEEE80211_CHAN_TURBO; else newflags &= ~IEEE80211_CHAN_TURBO; if (newflags != curflags) ieee80211_dturbo_switch(vap, newflags); } return capschanged; } /* * Decap the encapsulated frame pair and dispatch the first * for delivery. The second frame is returned for delivery * via the normal path. */ struct mbuf * ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m) { #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) struct ieee80211vap *vap = ni->ni_vap; struct llc *llc; uint32_t ath; struct mbuf *n; int framelen; /* NB: we assume caller does this check for us */ KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF), ("ff not negotiated")); /* * Check for fast-frame tunnel encapsulation. */ if (m->m_pkthdr.len < 3*FF_LLC_SIZE) return m; if (m->m_len < FF_LLC_SIZE && (m = m_pullup(m, FF_LLC_SIZE)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "fast-frame", "%s", "m_pullup(llc) failed"); vap->iv_stats.is_rx_tooshort++; return NULL; } llc = (struct llc *)(mtod(m, uint8_t *) + sizeof(struct ether_header)); if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE)) return m; m_adj(m, FF_LLC_SIZE); m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath); if (_IEEE80211_MASKSHIFT(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "fast-frame", "unsupport tunnel protocol, header 0x%x", ath); vap->iv_stats.is_ff_badhdr++; m_freem(m); return NULL; } /* NB: skip header and alignment padding */ m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2); vap->iv_stats.is_ff_decap++; /* * Decap the first frame, bust it apart from the * second and deliver; then decap the second frame * and return it to the caller for normal delivery. */ m = ieee80211_decap1(m, &framelen); if (m == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "fast-frame", "%s", "first decap failed"); vap->iv_stats.is_ff_tooshort++; return NULL; } - n = m_split(m, framelen, M_NOWAIT); + n = m_split(m, framelen, IEEE80211_M_NOWAIT); if (n == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "fast-frame", "%s", "unable to split encapsulated frames"); vap->iv_stats.is_ff_split++; m_freem(m); /* NB: must reclaim */ return NULL; } /* XXX not right for WDS */ vap->iv_deliver_data(vap, ni, m); /* 1st of pair */ /* * Decap second frame. */ m_adj(n, roundup2(framelen, 4) - framelen); /* padding */ n = ieee80211_decap1(n, &framelen); if (n == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "fast-frame", "%s", "second decap failed"); vap->iv_stats.is_ff_tooshort++; } /* XXX verify framelen against mbuf contents */ return n; /* 2nd delivered by caller */ #undef FF_LLC_SIZE } /* * Fast frame encapsulation. There must be two packets * chained with m_nextpkt. We do header adjustment for * each, add the tunnel encapsulation, and then concatenate * the mbuf chains to form a single frame for transmission. */ struct mbuf * ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, struct ieee80211_key *key) { struct mbuf *m2; struct ether_header eh1, eh2; struct llc *llc; struct mbuf *m; int pad; m2 = m1->m_nextpkt; if (m2 == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: only one frame\n", __func__); goto bad; } m1->m_nextpkt = NULL; /* * Adjust to include 802.11 header requirement. */ KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1); if (m1 == NULL) { printf("%s: failed initial mbuf_adjust\n", __func__); /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ m_freem(m2); goto bad; } /* * Copy second frame's Ethernet header out of line * and adjust for possible padding in case there isn't room * at the end of first frame. */ KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2); if (m2 == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ printf("%s: failed second \n", __func__); goto bad; } /* * Now do tunnel encapsulation. First, each * frame gets a standard encapsulation. */ m1 = ieee80211_ff_encap1(vap, m1, &eh1); if (m1 == NULL) goto bad; m2 = ieee80211_ff_encap1(vap, m2, &eh2); if (m2 == NULL) goto bad; /* * Pad leading frame to a 4-byte boundary. If there * is space at the end of the first frame, put it * there; otherwise prepend to the front of the second * frame. We know doing the second will always work * because we reserve space above. We prefer appending * as this typically has better DMA alignment properties. */ for (m = m1; m->m_next != NULL; m = m->m_next) ; pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; if (pad) { if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ m2->m_data -= pad; m2->m_len += pad; m2->m_pkthdr.len += pad; } else { /* append to first */ m->m_len += pad; m1->m_pkthdr.len += pad; } } /* * A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the * QoS header. * * XXX optimize by prepending together */ m->m_next = m2; /* NB: last mbuf from above */ m1->m_pkthdr.len += m2->m_pkthdr.len; - M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT); + M_PREPEND(m1, sizeof(uint32_t)+2, IEEE80211_M_NOWAIT); if (m1 == NULL) { /* XXX cannot happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no space for tunnel header\n", __func__); vap->iv_stats.is_tx_nobuf++; return NULL; } memset(mtod(m1, void *), 0, sizeof(uint32_t)+2); - M_PREPEND(m1, sizeof(struct llc), M_NOWAIT); + M_PREPEND(m1, sizeof(struct llc), IEEE80211_M_NOWAIT); if (m1 == NULL) { /* XXX cannot happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no space for llc header\n", __func__); vap->iv_stats.is_tx_nobuf++; return NULL; } llc = mtod(m1, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0; llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1; llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2; llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE); vap->iv_stats.is_ff_encap++; return m1; bad: vap->iv_stats.is_ff_encapfail++; if (m1 != NULL) m_freem(m1); if (m2 != NULL) m_freem(m2); return NULL; } /* * A-MSDU encapsulation. * * This assumes just two frames for now, since we're borrowing the * same queuing code and infrastructure as fast-frames. * * There must be two packets chained with m_nextpkt. * We do header adjustment for each, and then concatenate the mbuf chains * to form a single frame for transmission. */ struct mbuf * ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, struct ieee80211_key *key) { struct mbuf *m2; struct ether_header eh1, eh2; struct mbuf *m; int pad; m2 = m1->m_nextpkt; if (m2 == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: only one frame\n", __func__); goto bad; } m1->m_nextpkt = NULL; /* * Include A-MSDU header in adjusting header layout. */ KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); m1 = ieee80211_mbuf_adjust(vap, hdrspace + sizeof(struct llc) + sizeof(uint32_t) + sizeof(struct ether_header), key, m1); if (m1 == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ m_freem(m2); goto bad; } /* * Copy second frame's Ethernet header out of line * and adjust for encapsulation headers. Note that * we make room for padding in case there isn't room * at the end of first frame. */ KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2); if (m2 == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ goto bad; } /* * Now do tunnel encapsulation. First, each * frame gets a standard encapsulation. */ m1 = ieee80211_ff_encap1(vap, m1, &eh1); if (m1 == NULL) goto bad; m2 = ieee80211_ff_encap1(vap, m2, &eh2); if (m2 == NULL) goto bad; /* * Pad leading frame to a 4-byte boundary. If there * is space at the end of the first frame, put it * there; otherwise prepend to the front of the second * frame. We know doing the second will always work * because we reserve space above. We prefer appending * as this typically has better DMA alignment properties. */ for (m = m1; m->m_next != NULL; m = m->m_next) ; pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; if (pad) { if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ m2->m_data -= pad; m2->m_len += pad; m2->m_pkthdr.len += pad; } else { /* append to first */ m->m_len += pad; m1->m_pkthdr.len += pad; } } /* * Now, stick 'em together. */ m->m_next = m2; /* NB: last mbuf from above */ m1->m_pkthdr.len += m2->m_pkthdr.len; vap->iv_stats.is_amsdu_encap++; return m1; bad: vap->iv_stats.is_amsdu_encapfail++; if (m1 != NULL) m_freem(m1); if (m2 != NULL) m_freem(m2); return NULL; } static void ff_transmit(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; IEEE80211_TX_LOCK_ASSERT(ic); /* encap and xmit */ m = ieee80211_encap(vap, ni, m); if (m != NULL) (void) ieee80211_parent_xmitpkt(ic, m); else ieee80211_free_node(ni); } /* * Flush frames to device; note we re-use the linked list * the frames were stored on and use the sentinel (unchanged) * which may be non-NULL. */ static void ff_flush(struct mbuf *head, struct mbuf *last) { struct mbuf *m, *next; struct ieee80211_node *ni; struct ieee80211vap *vap; for (m = head; m != last; m = next) { next = m->m_nextpkt; m->m_nextpkt = NULL; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; vap = ni->ni_vap; IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, "%s: flush frame, age %u", __func__, M_AGE_GET(m)); vap->iv_stats.is_ff_flush++; ff_transmit(ni, m); } } /* * Age frames on the staging queue. */ void ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, int quanta) { struct mbuf *m, *head; struct ieee80211_node *ni; IEEE80211_FF_LOCK(ic); if (sq->depth == 0) { IEEE80211_FF_UNLOCK(ic); return; /* nothing to do */ } KASSERT(sq->head != NULL, ("stageq empty")); head = sq->head; while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) { int tid = WME_AC_TO_TID(M_WME_GETAC(m)); /* clear staging ref to frame */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty")); ni->ni_tx_superg[tid] = NULL; sq->head = m->m_nextpkt; sq->depth--; } if (m == NULL) sq->tail = NULL; else M_AGE_SUB(m, quanta); IEEE80211_FF_UNLOCK(ic); IEEE80211_TX_LOCK(ic); ff_flush(head, m); IEEE80211_TX_UNLOCK(ic); } static void stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m) { int age = ieee80211_ffagemax; IEEE80211_FF_LOCK_ASSERT(ic); if (sq->tail != NULL) { sq->tail->m_nextpkt = m; age -= M_AGE_GET(sq->head); } else { sq->head = m; struct timeout_task *qtask = &ic->ic_superg->ff_qtimer; taskqueue_enqueue_timeout(ic->ic_tq, qtask, age); } KASSERT(age >= 0, ("age %d", age)); M_AGE_SET(m, age); m->m_nextpkt = NULL; sq->tail = m; sq->depth++; } static void stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged) { struct mbuf *m, *mprev; IEEE80211_FF_LOCK_ASSERT(ic); mprev = NULL; for (m = sq->head; m != NULL; m = m->m_nextpkt) { if (m == mstaged) { if (mprev == NULL) sq->head = m->m_nextpkt; else mprev->m_nextpkt = m->m_nextpkt; if (sq->tail == m) sq->tail = mprev; sq->depth--; return; } mprev = m; } printf("%s: packet not found\n", __func__); } static uint32_t ff_approx_txtime(struct ieee80211_node *ni, const struct mbuf *m1, const struct mbuf *m2) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; uint32_t framelen; uint32_t frame_time; /* * Approximate the frame length to be transmitted. A swag to add * the following maximal values to the skb payload: * - 32: 802.11 encap + CRC * - 24: encryption overhead (if wep bit) * - 4 + 6: fast-frame header and padding * - 16: 2 LLC FF tunnel headers * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd) */ framelen = m1->m_pkthdr.len + 32 + ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR; if (vap->iv_flags & IEEE80211_F_PRIVACY) framelen += 24; if (m2 != NULL) framelen += m2->m_pkthdr.len; /* * For now, we assume non-shortgi, 20MHz, just because I want to * at least test 802.11n. */ if (ni->ni_txrate & IEEE80211_RATE_MCS) frame_time = ieee80211_compute_duration_ht(framelen, ni->ni_txrate, IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate), 0, /* isht40 */ 0); /* isshortgi */ else frame_time = ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0); return (frame_time); } /* * Check if the supplied frame can be partnered with an existing * or pending frame. Return a reference to any frame that should be * sent on return; otherwise return NULL. */ struct mbuf * ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_superg *sg = ic->ic_superg; const int pri = M_WME_GETAC(m); struct ieee80211_stageq *sq; struct ieee80211_tx_ampdu *tap; struct mbuf *mstaged; uint32_t txtime, limit; IEEE80211_TX_UNLOCK_ASSERT(ic); IEEE80211_LOCK(ic); limit = IEEE80211_TXOP_TO_US( ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); IEEE80211_UNLOCK(ic); /* * Check if the supplied frame can be aggregated. * * NB: we allow EAPOL frames to be aggregated with other ucast traffic. * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't * be aggregated with other types of frames when encryption is on? */ IEEE80211_FF_LOCK(ic); tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)]; mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)]; /* XXX NOTE: reusing packet counter state from A-MPDU */ /* * XXX NOTE: this means we're double-counting; it should just * be done in ieee80211_output.c once for both superg and A-MPDU. */ ieee80211_txampdu_count_packet(tap); /* * When not in station mode never aggregate a multicast * frame; this insures, for example, that a combined frame * does not require multiple encryption keys. */ if (vap->iv_opmode != IEEE80211_M_STA && ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) { /* XXX flush staged frame? */ IEEE80211_FF_UNLOCK(ic); return m; } /* * If there is no frame to combine with and the pps is * too low; then do not attempt to aggregate this frame. */ if (mstaged == NULL && ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) { IEEE80211_FF_UNLOCK(ic); return m; } sq = &sg->ff_stageq[pri]; /* * Check the txop limit to insure the aggregate fits. */ if (limit != 0 && (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) { /* * Aggregate too long, return to the caller for direct * transmission. In addition, flush any pending frame * before sending this one. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: txtime %u exceeds txop limit %u\n", __func__, txtime, limit); ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; if (mstaged != NULL) stageq_remove(ic, sq, mstaged); IEEE80211_FF_UNLOCK(ic); if (mstaged != NULL) { IEEE80211_TX_LOCK(ic); IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, "%s: flush staged frame", __func__); /* encap and xmit */ ff_transmit(ni, mstaged); IEEE80211_TX_UNLOCK(ic); } return m; /* NB: original frame */ } /* * An aggregation candidate. If there's a frame to partner * with then combine and return for processing. Otherwise * save this frame and wait for a partner to show up (or * the frame to be flushed). Note that staged frames also * hold their node reference. */ if (mstaged != NULL) { ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; stageq_remove(ic, sq, mstaged); IEEE80211_FF_UNLOCK(ic); IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, "%s: aggregate fast-frame", __func__); /* * Release the node reference; we only need * the one already in mstaged. */ KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni, ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni)); ieee80211_free_node(ni); m->m_nextpkt = NULL; mstaged->m_nextpkt = m; mstaged->m_flags |= M_FF; /* NB: mark for encap work */ } else { KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)] == NULL, ("ni_tx_superg[]: %p", ni->ni_tx_superg[WME_AC_TO_TID(pri)])); ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m; stageq_add(ic, sq, m); IEEE80211_FF_UNLOCK(ic); IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, "%s: stage frame, %u queued", __func__, sq->depth); /* NB: mstaged is NULL */ } return mstaged; } struct mbuf * ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m) { /* * XXX TODO: actually enforce the node support * and HTCAP requirements for the maximum A-MSDU * size. */ /* First: software A-MSDU transmit? */ if (! ieee80211_amsdu_tx_ok(ni)) return (m); /* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */ if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST)) return (m); /* Next - needs to be a data frame, non-broadcast, etc */ if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) return (m); return (ieee80211_ff_check(ni, m)); } void ieee80211_ff_node_init(struct ieee80211_node *ni) { /* * Clean FF state on re-associate. This handles the case * where a station leaves w/o notifying us and then returns * before node is reaped for inactivity. */ ieee80211_ff_node_cleanup(ni); } void ieee80211_ff_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_superg *sg = ic->ic_superg; struct mbuf *m, *next_m, *head; int tid; IEEE80211_FF_LOCK(ic); head = NULL; for (tid = 0; tid < WME_NUM_TID; tid++) { int ac = TID_TO_WME_AC(tid); /* * XXX Initialise the packet counter. * * This may be double-work for 11n stations; * but without it we never setup things. */ ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]); m = ni->ni_tx_superg[tid]; if (m != NULL) { ni->ni_tx_superg[tid] = NULL; stageq_remove(ic, &sg->ff_stageq[ac], m); m->m_nextpkt = head; head = m; } } IEEE80211_FF_UNLOCK(ic); /* * Free mbufs, taking care to not dereference the mbuf after * we free it (hence grabbing m_nextpkt before we free it.) */ m = head; while (m != NULL) { next_m = m->m_nextpkt; m_freem(m); ieee80211_free_node(ni); m = next_m; } } /* * Switch between turbo and non-turbo operating modes. * Use the specified channel flags to locate the new * channel, update 802.11 state, and then call back into * the driver to effect the change. */ void ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *chan; chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags); if (chan == NULL) { /* XXX should not happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no channel with freq %u flags 0x%x\n", __func__, ic->ic_bsschan->ic_freq, newflags); return; } IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: %s -> %s (freq %u flags 0x%x)\n", __func__, ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)], ieee80211_phymode_name[ieee80211_chan2mode(chan)], chan->ic_freq, chan->ic_flags); ic->ic_bsschan = chan; ic->ic_prevchan = ic->ic_curchan; ic->ic_curchan = chan; ic->ic_rt = ieee80211_get_ratetable(chan); ic->ic_set_channel(ic); ieee80211_radiotap_chan_change(ic); /* NB: do not need to reset ERP state 'cuz we're in sta mode */ } /* * Return the current ``state'' of an Atheros capbility. * If associated in station mode report the negotiated * setting. Otherwise report the current setting. */ static int getathcap(struct ieee80211vap *vap, int cap) { if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0; else return (vap->iv_flags & cap) != 0; } static int superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { switch (ireq->i_type) { case IEEE80211_IOC_FF: ireq->i_val = getathcap(vap, IEEE80211_F_FF); break; case IEEE80211_IOC_TURBOP: ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP); break; default: return ENOSYS; } return 0; } IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211); static int superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) { switch (ireq->i_type) { case IEEE80211_IOC_FF: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_FF) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_FF; } else vap->iv_flags &= ~IEEE80211_F_FF; return ENETRESET; case IEEE80211_IOC_TURBOP: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_TURBOP; } else vap->iv_flags &= ~IEEE80211_F_TURBOP; return ENETRESET; default: return ENOSYS; } } IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211); #endif /* IEEE80211_SUPPORT_SUPERG */ diff --git a/sys/net80211/ieee80211_wds.c b/sys/net80211/ieee80211_wds.c index debe89436d3d..bb85b430b5b3 100644 --- a/sys/net80211/ieee80211_wds.c +++ b/sys/net80211/ieee80211_wds.c @@ -1,804 +1,804 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif /* * IEEE 802.11 WDS mode support. */ #include "opt_inet.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif static void wds_vattach(struct ieee80211vap *); static int wds_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int wds_input(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs, int, int); static void wds_recv_mgmt(struct ieee80211_node *, struct mbuf *, int subtype, const struct ieee80211_rx_stats *, int, int); void ieee80211_wds_attach(struct ieee80211com *ic) { ic->ic_vattach[IEEE80211_M_WDS] = wds_vattach; } void ieee80211_wds_detach(struct ieee80211com *ic) { } static void wds_vdetach(struct ieee80211vap *vap) { if (vap->iv_bss != NULL) { /* XXX locking? */ if (vap->iv_bss->ni_wdsvap == vap) vap->iv_bss->ni_wdsvap = NULL; } } static void wds_vattach(struct ieee80211vap *vap) { vap->iv_newstate = wds_newstate; vap->iv_input = wds_input; vap->iv_recv_mgmt = wds_recv_mgmt; vap->iv_opdetach = wds_vdetach; } static void wds_flush(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mbuf *m, *next; int8_t rssi, nf; m = ieee80211_ageq_remove(&ic->ic_stageq, (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr)); if (m == NULL) return; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_WDS, ni, "%s", "flush wds queue"); ic->ic_node_getsignal(ni, &rssi, &nf); for (; m != NULL; m = next) { next = m->m_nextpkt; m->m_nextpkt = NULL; ieee80211_input(ni, m, rssi, nf); } } static int ieee80211_create_wds(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *ni, *obss; IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: creating link to %s on channel %u\n", __func__, ether_sprintf(vap->iv_des_bssid), ieee80211_chan2ieee(ic, chan)); /* NB: vap create must specify the bssid for the link */ KASSERT(vap->iv_flags & IEEE80211_F_DESBSSID, ("no bssid")); /* NB: we should only be called on RUN transition */ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("!RUN state")); if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) { /* * Dynamic/non-legacy WDS. Reference the associated * station specified by the desired bssid setup at vap * create. Point ni_wdsvap at the WDS vap so 4-address * frames received through the associated AP vap will * be dispatched upward (e.g. to a bridge) as though * they arrived on the WDS vap. */ IEEE80211_NODE_LOCK(nt); obss = NULL; ni = ieee80211_find_node_locked(&ic->ic_sta, vap->iv_des_bssid); if (ni == NULL) { /* * Node went away before we could hookup. This * should be ok; no traffic will flow and a leave * event will be dispatched that should cause * the vap to be destroyed. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: station %s went away\n", __func__, ether_sprintf(vap->iv_des_bssid)); /* XXX stat? */ } else if (ni->ni_wdsvap != NULL) { /* * Node already setup with a WDS vap; we cannot * allow multiple references so disallow. If * ni_wdsvap points at us that's ok; we should * do nothing anyway. */ /* XXX printf instead? */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS, "%s: station %s in use with %s\n", __func__, ether_sprintf(vap->iv_des_bssid), ni->ni_wdsvap->iv_ifp->if_xname); /* XXX stat? */ } else { /* * Committed to new node, setup state. */ obss = vap->iv_update_bss(vap, ni); ni->ni_wdsvap = vap; } IEEE80211_NODE_UNLOCK(nt); if (obss != NULL) { /* NB: deferred to avoid recursive lock */ ieee80211_free_node(obss); } } else { /* * Legacy WDS vap setup. */ /* * The far end does not associate so we just create * create a new node and install it as the vap's * bss node. We must simulate an association and * authorize the port for traffic to flow. * XXX check if node already in sta table? */ ni = ieee80211_node_create_wds(vap, vap->iv_des_bssid, chan); if (ni != NULL) { obss = vap->iv_update_bss(vap, ieee80211_ref_node(ni)); ni->ni_flags |= IEEE80211_NODE_AREF; if (obss != NULL) ieee80211_free_node(obss); /* give driver a chance to setup state like ni_txrate */ if (ic->ic_newassoc != NULL) ic->ic_newassoc(ni, 1); /* tell the authenticator about new station */ if (vap->iv_auth->ia_node_join != NULL) vap->iv_auth->ia_node_join(ni); if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); ieee80211_notify_node_join(ni, 1 /*newassoc*/); /* XXX inject l2uf frame */ } } /* * Flush any pending frames now that were setup. */ if (ni != NULL) wds_flush(ni); return (ni == NULL ? ENOENT : 0); } /* * Propagate multicast frames of an ap vap to all DWDS links. * The caller is assumed to have verified this frame is multicast. */ void ieee80211_dwds_mcast(struct ieee80211vap *vap0, struct mbuf *m) { struct ieee80211com *ic = vap0->iv_ic; const struct ether_header *eh = mtod(m, const struct ether_header *); struct ieee80211_node *ni; struct ieee80211vap *vap; struct ifnet *ifp; struct mbuf *mcopy; int err; KASSERT(ETHER_IS_MULTICAST(eh->ether_dhost), ("%s not mcast", ether_sprintf(eh->ether_dhost))); /* XXX locking */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { /* only DWDS vaps are interesting */ if (vap->iv_opmode != IEEE80211_M_WDS || (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)) continue; /* if it came in this interface, don't send it back out */ ifp = vap->iv_ifp; if (ifp == m->m_pkthdr.rcvif) continue; /* * Duplicate the frame and send it. */ - mcopy = m_copypacket(m, M_NOWAIT); + mcopy = m_copypacket(m, IEEE80211_M_NOWAIT); if (mcopy == NULL) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX stat + msg */ continue; } ni = ieee80211_find_txnode(vap, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(mcopy); continue; } /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, mcopy)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_WDS, eh->ether_dhost, NULL, "%s", "classification failure"); vap->iv_stats.is_tx_classify++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(mcopy); ieee80211_free_node(ni); continue; } BPF_MTAP(ifp, m); /* 802.3 tx */ /* * Encapsulate the packet in prep for transmission. */ IEEE80211_TX_LOCK(ic); mcopy = ieee80211_encap(vap, ni, mcopy); if (mcopy == NULL) { /* NB: stat+msg handled in ieee80211_encap */ IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(ni); continue; } mcopy->m_flags |= M_MCAST; MPASS((mcopy->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); mcopy->m_pkthdr.rcvif = (void *) ni; err = ieee80211_parent_xmitpkt(ic, mcopy); IEEE80211_TX_UNLOCK(ic); if (!err) { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); } } } /* * Handle DWDS discovery on receipt of a 4-address frame in * ap mode. Queue the frame and post an event for someone * to plumb the necessary WDS vap for this station. Frames * received prior to the vap set running will then be reprocessed * as if they were just received. */ void ieee80211_dwds_discover(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211com *ic = ni->ni_ic; /* * Save the frame with an aging interval 4 times * the listen interval specified by the station. * Frames that sit around too long are reclaimed * using this information. * XXX handle overflow? * XXX per/vap beacon interval? */ MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); m->m_pkthdr.rcvif = (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr); (void) ieee80211_ageq_append(&ic->ic_stageq, m, ((ni->ni_intval * ic->ic_lintval) << 2) / 1024); ieee80211_notify_wds_discover(ni); } /* * IEEE80211_M_WDS vap state machine handler. */ static int wds_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; enum ieee80211_state ostate; int error; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); vap->iv_state = nstate; /* state transition */ callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */ if (ostate != IEEE80211_S_SCAN) ieee80211_cancel_scan(vap); /* background scan */ error = 0; switch (nstate) { case IEEE80211_S_INIT: switch (ostate) { case IEEE80211_S_SCAN: ieee80211_cancel_scan(vap); break; default: break; } if (ostate != IEEE80211_S_INIT) { /* NB: optimize INIT -> INIT case */ ieee80211_reset_bss(vap); } break; case IEEE80211_S_SCAN: switch (ostate) { case IEEE80211_S_INIT: ieee80211_check_scan_current(vap); break; default: break; } break; case IEEE80211_S_RUN: if (ostate == IEEE80211_S_INIT) { /* * Already have a channel; bypass the scan * and startup immediately. */ error = ieee80211_create_wds(vap, ic->ic_curchan); } break; default: break; } return error; } /* * Process a received frame. The node associated with the sender * should be supplied. If nothing was found in the node table then * the caller is assumed to supply a reference to iv_bss instead. * The RSSI and a timestamp are also supplied. The RSSI data is used * during AP scanning to select a AP to associate with; it can have * any units so long as values have consistent units and higher values * mean ``better signal''. The receive timestamp is currently not used * by the 802.11 layer. */ static int wds_input(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct ieee80211_key *key; struct ether_header *eh; int hdrspace, need_tap = 1; /* mbuf need to be tapped. */ uint8_t dir, type, subtype, qos; int is_hw_decrypted = 0; int has_decrypted = 0; /* * Some devices do hardware decryption all the way through * to pretending the frame wasn't encrypted in the first place. * So, tag it appropriately so it isn't discarded inappropriately. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_DECRYPTED)) is_hw_decrypted = 1; if (m->m_flags & M_AMPDU_MPDU) { /* * Fastpath for A-MPDU reorder q resubmission. Frames * w/ M_AMPDU_MPDU marked have already passed through * here but were received out of order and been held on * the reorder queue. When resubmitted they are marked * with the M_AMPDU_MPDU flag and we can bypass most of * the normal processing. */ wh = mtod(m, struct ieee80211_frame *); type = IEEE80211_FC0_TYPE_DATA; dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; subtype = IEEE80211_FC0_SUBTYPE_QOS; hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */ goto resubmit_ampdu; } KASSERT(ni != NULL, ("null node")); type = -1; /* undefined */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (1): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* * Bit of a cheat here, we use a pointer for a 3-address * frame format but don't reference fields past outside * ieee80211_frame_min w/o first validating the data is * present. */ wh = mtod(m, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) ni->ni_inact = ni->ni_inact_reload; if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x", wh->i_fc[0], wh->i_fc[1]); vap->iv_stats.is_rx_badversion++; goto err; } dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* NB: WDS vap's do not scan */ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_addr4)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "too short (3): len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } /* NB: the TA is implicitly verified by finding the wds peer node */ if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr) && !IEEE80211_ADDR_EQ(wh->i_addr1, ifp->if_broadcastaddr)) { /* not interested in */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr1, NULL, "%s", "not to bss"); vap->iv_stats.is_rx_wrongbss++; goto out; } IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi); ni->ni_noise = nf; if (IEEE80211_HAS_SEQ(type, subtype)) { uint8_t tid = ieee80211_gettid(wh); if (IEEE80211_QOS_HAS_SEQ(wh) && TID_TO_WME_AC(tid) >= WME_AC_VI) ic->ic_wme.wme_hipri_traffic++; if (! ieee80211_check_rxseq(ni, wh, wh->i_addr1, rxs)) goto out; } switch (type) { case IEEE80211_FC0_TYPE_DATA: hdrspace = ieee80211_hdrspace(ic, wh); if (m->m_len < hdrspace && (m = m_pullup(m, hdrspace)) == NULL) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, NULL, "data too short: expecting %u", hdrspace); vap->iv_stats.is_rx_tooshort++; goto out; /* XXX */ } if (dir != IEEE80211_FC1_DIR_DSTODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto out; } /* * Only legacy WDS traffic should take this path. */ if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "%s", "not legacy wds"); vap->iv_stats.is_rx_wrongdir++;/*XXX*/ goto out; } /* * Handle A-MPDU re-ordering. If the frame is to be * processed directly then ieee80211_ampdu_reorder * will return 0; otherwise it has consumed the mbuf * and we should do nothing more with it. */ if ((m->m_flags & M_AMPDU) && ieee80211_ampdu_reorder(ni, m, rxs) != 0) { m = NULL; goto out; } resubmit_ampdu: /* * Handle privacy requirements. Note that we * must not be preempted from here until after * we (potentially) call ieee80211_crypto_demic; * otherwise we may violate assumptions in the * crypto cipher modules used to do delayed update * of replay sequence numbers. */ if (is_hw_decrypted || IEEE80211_IS_PROTECTED(wh)) { if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { /* * Discard encrypted frames when privacy is off. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "WEP", "%s", "PRIVACY off"); vap->iv_stats.is_rx_noprivacy++; IEEE80211_NODE_STAT(ni, rx_noprivacy); goto out; } if (ieee80211_crypto_decap(ni, m, hdrspace, &key) == 0) { /* NB: stats+msgs handled in crypto_decap */ IEEE80211_NODE_STAT(ni, rx_wepfail); goto out; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; has_decrypted = 1; } else { /* XXX M_WEP and IEEE80211_F_PRIVACY */ key = NULL; } /* * Save QoS bits for use below--before we strip the header. */ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) qos = ieee80211_getqos(wh)[0]; else qos = 0; /* * Next up, any fragmentation. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { m = ieee80211_defrag(ni, m, hdrspace, has_decrypted); if (m == NULL) { /* Fragment dropped or frame not complete yet */ goto out; } } wh = NULL; /* no longer valid, catch any uses */ /* * Next strip any MSDU crypto bits. */ if (!ieee80211_crypto_demic(vap, key, m, 0)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "demic error"); vap->iv_stats.is_rx_demicfail++; IEEE80211_NODE_STAT(ni, rx_demicfail); goto out; } /* copy to listener after decrypt */ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); need_tap = 0; /* * Finally, strip the 802.11 header. */ m = ieee80211_decap(vap, m, hdrspace, qos); if (m == NULL) { /* XXX mask bit to check for both */ /* don't count Null data frames as errors */ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA || subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) goto out; IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "%s", "decap error"); vap->iv_stats.is_rx_decap++; IEEE80211_NODE_STAT(ni, rx_decap); goto err; } if (!(qos & IEEE80211_QOS_AMSDU)) eh = mtod(m, struct ether_header *); else eh = NULL; if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ if (eh == NULL || eh->ether_type != htons(ETHERTYPE_PAE)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, ni->ni_macaddr, "data", "unauthorized or " "unknown port: ether type 0x%x len %u", eh == NULL ? -1 : eh->ether_type, m->m_pkthdr.len); vap->iv_stats.is_rx_unauth++; IEEE80211_NODE_STAT(ni, rx_unauth); goto err; } } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && ((has_decrypted == 0) && (m->m_flags & M_WEP) == 0) && (is_hw_decrypted == 0) && (eh == NULL || eh->ether_type != htons(ETHERTYPE_PAE))) { /* * Drop unencrypted frames. */ vap->iv_stats.is_rx_unencrypted++; IEEE80211_NODE_STAT(ni, rx_unencrypted); goto out; } } /* XXX require HT? */ if (qos & IEEE80211_QOS_AMSDU) { m = ieee80211_decap_amsdu(ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; } else { #ifdef IEEE80211_SUPPORT_SUPERG m = ieee80211_decap_fastframe(vap, ni, m); if (m == NULL) return IEEE80211_FC0_TYPE_DATA; #endif } ieee80211_deliver_data(vap, ni, m); return IEEE80211_FC0_TYPE_DATA; case IEEE80211_FC0_TYPE_MGT: vap->iv_stats.is_rx_mgmt++; IEEE80211_NODE_STAT(ni, rx_mgmt); if (dir != IEEE80211_FC1_DIR_NODS) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, "data", "incorrect dir 0x%x", dir); vap->iv_stats.is_rx_wrongdir++; goto err; } if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, ni->ni_macaddr, "mgt", "too short: len %u", m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; goto out; } #ifdef IEEE80211_DEBUG if (ieee80211_msg_debug(vap) || ieee80211_msg_dumppkts(vap)) { if_printf(ifp, "received %s from %s rssi %d\n", ieee80211_mgt_subtype_name(subtype), ether_sprintf(wh->i_addr2), rssi); } #endif if (IEEE80211_IS_PROTECTED(wh)) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "WEP set but not permitted"); vap->iv_stats.is_rx_mgtdiscard++; /* XXX */ goto out; } vap->iv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); goto out; case IEEE80211_FC0_TYPE_CTL: vap->iv_stats.is_rx_ctl++; IEEE80211_NODE_STAT(ni, rx_ctrl); goto out; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "bad", "frame type 0x%x", type); /* should not come here */ break; } err: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); out: if (m != NULL) { if (need_tap && ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_rx(vap, m); m_freem(m); } return type; } static void wds_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; u_int8_t *frm, *efrm; wh = mtod(m0, struct ieee80211_frame *); frm = (u_int8_t *)&wh[1]; efrm = mtod(m0, u_int8_t *) + m0->m_len; switch (subtype) { case IEEE80211_FC0_SUBTYPE_ACTION: case IEEE80211_FC0_SUBTYPE_ACTION_NOACK: if (ni == vap->iv_bss) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "unknown node"); vap->iv_stats.is_rx_mgtdiscard++; } else if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1)) { /* NB: not interested in multicast frames. */ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not for us"); vap->iv_stats.is_rx_mgtdiscard++; } else if (vap->iv_state != IEEE80211_S_RUN) { IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "wrong state %s", ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_rx_mgtdiscard++; } else { if (ieee80211_parse_action(ni, m0) == 0) (void)ic->ic_recv_action(ni, wh, frm, efrm); } break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_BEACON: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); vap->iv_stats.is_rx_mgtdiscard++; break; default: IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY, wh, "mgt", "subtype 0x%x not handled", subtype); vap->iv_stats.is_rx_badsubtype++; break; } }