Index: head/sys/dev/bwn/if_bwn.c =================================================================== --- head/sys/dev/bwn/if_bwn.c (revision 330687) +++ head/sys/dev/bwn/if_bwn.c (revision 330688) @@ -1,7779 +1,7753 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2010 Weongyo Jeong * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include "opt_bwn.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_nvram_map.h" #include "gpio_if.h" static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static int bwn_retain_bus_providers(struct bwn_softc *sc); static void bwn_release_bus_providers(struct bwn_softc *sc); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, const uint8_t []); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_forceclk(struct bwn_mac *, bool); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_ratectl_tx_complete(const struct ieee80211_node *, const struct bwn_txstatus *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static int bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static int bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_sysctl_node(struct bwn_softc *); static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; #if 0 static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; #endif #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(HP_COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const char *bwn_led_vars[] = { BHND_NVAR_LEDBH0, BHND_NVAR_LEDBH1, BHND_NVAR_LEDBH2, BHND_NVAR_LEDBH3 }; static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; /* Supported D11 core revisions */ #define BWN_DEV(_hwrev) {{ \ BHND_MATCH_CORE(BHND_MFGID_BCM, BHND_COREID_D11), \ BHND_MATCH_CORE_REV(_hwrev), \ }} static const struct bhnd_device bwn_devices[] = { BWN_DEV(HWREV_RANGE(5, 16)), BWN_DEV(HWREV_EQ(23)), BHND_DEVICE_END }; /* D11 quirks when bridged via a PCI host bridge core */ static const struct bhnd_device_quirk pci_bridge_quirks[] = { BHND_CORE_QUIRK (HWREV_LTE(10), BWN_QUIRK_UCODE_SLOWCLOCK_WAR), BHND_DEVICE_QUIRK_END }; /* D11 quirks when bridged via a PCMCIA host bridge core */ static const struct bhnd_device_quirk pcmcia_bridge_quirks[] = { BHND_CORE_QUIRK (HWREV_ANY, BWN_QUIRK_NODMA), BHND_DEVICE_QUIRK_END }; /* Host bridge cores for which D11 quirk flags should be applied */ static const struct bhnd_device bridge_devices[] = { BHND_DEVICE(BCM, PCI, NULL, pci_bridge_quirks), BHND_DEVICE(BCM, PCMCIA, NULL, pcmcia_bridge_quirks), BHND_DEVICE_END }; static int bwn_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, bwn_devices, sizeof(bwn_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc; device_t parent, hostb; char chip_name[BHND_CHIPID_MAX_NAMELEN]; int error; sc = device_get_softc(dev); sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif mac = NULL; /* Determine the driver quirks applicable to this device, including any * quirks specific to the bus host bridge core (if any) */ sc->sc_quirks = bhnd_device_quirks(dev, bwn_devices, sizeof(bwn_devices[0])); parent = device_get_parent(dev); if ((hostb = bhnd_bus_find_hostb_device(parent)) != NULL) { sc->sc_quirks |= bhnd_device_quirks(hostb, bridge_devices, sizeof(bridge_devices[0])); } /* DMA explicitly disabled? */ if (!bwn_usedma) sc->sc_quirks |= BWN_QUIRK_NODMA; /* Fetch our chip identification and board info */ sc->sc_cid = *bhnd_get_chipid(dev); if ((error = bhnd_read_board_info(dev, &sc->sc_board_info))) { device_printf(sc->sc_dev, "couldn't read board info\n"); return (error); } /* Allocate our D11 register block and PMU state */ sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(sc->sc_dev, "couldn't allocate registers\n"); return (error); } if ((error = bhnd_alloc_pmu(sc->sc_dev))) { bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); return (error); } /* Retain references to all required bus service providers */ if ((error = bwn_retain_bus_providers(sc))) goto fail; /* Fetch mask of available antennas */ error = bhnd_nvram_getvar_uint8(sc->sc_dev, BHND_NVAR_AA2G, &sc->sc_ant2g); if (error) { device_printf(sc->sc_dev, "error determining 2GHz antenna " "availability from NVRAM: %d\n", error); goto fail; } error = bhnd_nvram_getvar_uint8(sc->sc_dev, BHND_NVAR_AA5G, &sc->sc_ant5g); if (error) { device_printf(sc->sc_dev, "error determining 5GHz antenna " "availability from NVRAM: %d\n", error); goto fail; } if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail; error = bwn_led_attach(mac); if (error) goto fail; bhnd_format_chip_id(chip_name, sizeof(chip_name), sc->sc_cid.chip_id); device_printf(sc->sc_dev, "WLAN (%s rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", chip_name, bhnd_get_hwrev(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_dmatype); else device_printf(sc->sc_dev, "PIO\n"); #ifdef BWN_GPL_PHY device_printf(sc->sc_dev, "Note: compiled with BWN_GPL_PHY; includes GPLv2 code\n"); #endif mac->mac_rid_irq = 0; mac->mac_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &mac->mac_rid_irq, RF_ACTIVE | RF_SHAREABLE); if (mac->mac_res_irq == NULL) { device_printf(sc->sc_dev, "couldn't allocate IRQ resource\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, mac->mac_res_irq, INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); goto fail; } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail: if (mac != NULL && mac->mac_res_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, mac->mac_rid_irq, mac->mac_res_irq); } free(mac, M_DEVBUF); bhnd_release_pmu(dev); bwn_release_bus_providers(sc); if (sc->sc_mem_res != NULL) { bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); } return (error); } static int bwn_retain_bus_providers(struct bwn_softc *sc) { struct chipc_caps *ccaps; sc->sc_chipc = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_CHIPC); if (sc->sc_chipc == NULL) { device_printf(sc->sc_dev, "ChipCommon device not found\n"); goto failed; } ccaps = BHND_CHIPC_GET_CAPS(sc->sc_chipc); sc->sc_gpio = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_GPIO); if (sc->sc_gpio == NULL) { device_printf(sc->sc_dev, "GPIO device not found\n"); goto failed; } if (ccaps->pmu) { sc->sc_pmu = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_PMU); if (sc->sc_pmu == NULL) { device_printf(sc->sc_dev, "PMU device not found\n"); goto failed; } } return (0); failed: bwn_release_bus_providers(sc); return (ENXIO); } static void bwn_release_bus_providers(struct bwn_softc *sc) { #define BWN_RELEASE_PROV(_sc, _prov, _service) do { \ if ((_sc)-> _prov != NULL) { \ bhnd_release_provider((_sc)->sc_dev, (_sc)-> _prov, \ (_service)); \ (_sc)-> _prov = NULL; \ } \ } while (0) BWN_RELEASE_PROV(sc, sc_chipc, BHND_SERVICE_CHIPC); BWN_RELEASE_PROV(sc, sc_gpio, BHND_SERVICE_GPIO); BWN_RELEASE_PROV(sc, sc_pmu, BHND_SERVICE_PMU); #undef BWN_RELEASE_PROV } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic; const char *mac_varname; u_int core_unit; int error; ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ /* Determine the NVRAM variable containing our MAC address */ core_unit = bhnd_get_core_unit(sc->sc_dev); mac_varname = NULL; if (sc->sc_board_info.board_srom_rev <= 2) { if (core_unit == 0) { mac_varname = BHND_NVAR_IL0MACADDR; } else if (core_unit == 1) { mac_varname = BHND_NVAR_ET1MACADDR; } } else { if (core_unit == 0) { mac_varname = BHND_NVAR_MACADDR; } } if (mac_varname == NULL) { device_printf(sc->sc_dev, "missing MAC address variable for " "D11 core %u", core_unit); return (ENXIO); } /* Read the MAC address from NVRAM */ error = bhnd_nvram_getvar_array(sc->sc_dev, mac_varname, ic->ic_macaddr, sizeof(ic->ic_macaddr), BHND_NVRAM_TYPE_UINT8_ARRAY); if (error) { device_printf(sc->sc_dev, "error reading %s: %d\n", mac_varname, error); return (error); } /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); if (mac->mac_intrhand != NULL) { bus_teardown_intr(dev, mac->mac_res_irq, mac->mac_intrhand); mac->mac_intrhand = NULL; } bhnd_release_pmu(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); bus_release_resource(dev, SYS_RES_IRQ, mac->mac_rid_irq, mac->mac_res_irq); mbufq_drain(&sc->sc_snd); bwn_release_firmware(mac); BWN_LOCK_DESTROY(sc); bwn_release_bus_providers(sc); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); #define BWN_ISDEV(_device, _subvendor, _subdevice) \ ((sc->sc_board_info.board_devid == PCI_DEVID_##_device) && \ (sc->sc_board_info.board_vendor == PCI_VENDOR_##_subvendor) && \ (sc->sc_board_info.board_type == _subdevice)) /* A subset of Apple Airport Extreme (BCM4306 rev 2) devices * were programmed with a missing PACTRL boardflag */ if (sc->sc_board_info.board_vendor == PCI_VENDOR_APPLE && sc->sc_board_info.board_type == 0x4e && sc->sc_board_info.board_rev > 0x40) sc->sc_board_info.board_flags |= BHND_BFL_PACTRL; if (BWN_ISDEV(BCM4318_D11G, ASUSTEK, 0x100f) || BWN_ISDEV(BCM4306_D11G, DELL, 0x0003) || BWN_ISDEV(BCM4306_D11G, HP, 0x12f8) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0013) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0014) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0015) || BWN_ISDEV(BCM4306_D11G, MOTOROLA, 0x7010)) sc->sc_board_info.board_flags &= ~BHND_BFL_BTCOEX; #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (bhnd_get_hwrev(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint16_t iost; KASSERT(bhnd_get_hwrev(sc->sc_dev) >= 5, ("unsupported revision %d", bhnd_get_hwrev(sc->sc_dev))); if ((error = bwn_core_forceclk(mac, true))) return (error); if ((error = bhnd_read_iost(sc->sc_dev, &iost))) { device_printf(sc->sc_dev, "error reading I/O status flags: " "%d\n", error); return (error); } have_a = (iost & BWN_IOST_HAVE_5GHZ) ? 1 : 0; have_bg = (iost & BWN_IOST_HAVE_2GHZ) ? 1 : 0; if (iost & BWN_IOST_DUALPHY) { have_bg = 1; have_a = 1; } #if 0 device_printf(sc->sc_dev, "%s: iost=0x%04hx, have_a=%d, have_bg=%d," " deviceid=0x%04x, siba_deviceid=0x%04x\n", __func__, iost, have_a, have_bg, sc->sc_board_info.board_devid, sc->sc_cid.chip_id); #endif /* * Guess at whether it has A-PHY or G-PHY. * This is just used for resetting the core to probe things; * we will re-guess once it's all up and working. */ error = bwn_reset_core(mac, have_bg); if (error) goto fail; /* * Determine the DMA engine type */ if (iost & BHND_IOST_DMA64) { mac->mac_dmatype = BHND_DMA_ADDR_64BIT; } else { uint32_t tmp; uint16_t base; base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) { mac->mac_dmatype = BHND_DMA_ADDR_32BIT; } else { mac->mac_dmatype = BHND_DMA_ADDR_30BIT; } } /* * Get the PHY version. */ error = bwn_phy_getinfo(mac, have_bg); if (error) goto fail; /* * This is the whitelist of devices which we "believe" * the SPROM PHY config from. The rest are "guessed". */ if (sc->sc_board_info.board_devid != PCI_DEVID_BCM4311_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4328_D11G && sc->sc_board_info.board_devid != PCI_DEVID_BCM4318_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4306_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4321_D11N && sc->sc_board_info.board_devid != PCI_DEVID_BCM4322_D11N) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* * XXX The PHY-G support doesn't do 5GHz operation. */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { device_printf(sc->sc_dev, "%s: forcing 2GHz only; no dual-band support for PHY\n", __func__); have_a = 0; have_bg = 1; } mac->mac_phy.phy_n = NULL; if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_N) { mac->mac_phy.attach = bwn_phy_n_attach; mac->mac_phy.detach = bwn_phy_n_detach; mac->mac_phy.prepare_hw = bwn_phy_n_prepare_hw; mac->mac_phy.init_pre = bwn_phy_n_init_pre; mac->mac_phy.init = bwn_phy_n_init; mac->mac_phy.exit = bwn_phy_n_exit; mac->mac_phy.phy_read = bwn_phy_n_read; mac->mac_phy.phy_write = bwn_phy_n_write; mac->mac_phy.rf_read = bwn_phy_n_rf_read; mac->mac_phy.rf_write = bwn_phy_n_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_n_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_n_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_n_switch_analog; mac->mac_phy.switch_channel = bwn_phy_n_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_n_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_n_set_antenna; mac->mac_phy.set_im = bwn_phy_n_im; mac->mac_phy.recalc_txpwr = bwn_phy_n_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_n_set_txpwr; mac->mac_phy.task_15s = bwn_phy_n_task_15s; mac->mac_phy.task_60s = bwn_phy_n_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } error = bwn_reset_core(mac, have_bg); if (error) goto fail; error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); fail: bhnd_suspend_hw(sc->sc_dev, 0); bwn_release_firmware(mac); return (error); } /* * Reset */ int bwn_reset_core(struct bwn_mac *mac, int g_mode) { struct bwn_softc *sc; uint32_t ctl; uint16_t ioctl, ioctl_mask; int error; sc = mac->mac_sc; DPRINTF(sc, BWN_DEBUG_RESET, "%s: g_mode=%d\n", __func__, g_mode); /* Reset core */ ioctl = (BWN_IOCTL_PHYCLOCK_ENABLE | BWN_IOCTL_PHYRESET); if (g_mode) ioctl |= BWN_IOCTL_SUPPORT_G; /* XXX N-PHY only; and hard-code to 20MHz for now */ if (mac->mac_phy.type == BWN_PHYTYPE_N) ioctl |= BWN_IOCTL_PHY_BANDWIDTH_20MHZ; if ((error = bhnd_reset_hw(sc->sc_dev, ioctl, ioctl))) { device_printf(sc->sc_dev, "core reset failed: %d", error); return (error); } DELAY(2000); /* Take PHY out of reset */ ioctl = BHND_IOCTL_CLK_FORCE; ioctl_mask = BHND_IOCTL_CLK_FORCE | BWN_IOCTL_PHYRESET | BWN_IOCTL_PHYCLOCK_ENABLE; if ((error = bhnd_write_ioctl(sc->sc_dev, ioctl, ioctl_mask))) { device_printf(sc->sc_dev, "failed to set core ioctl flags: " "%d\n", error); return (error); } DELAY(2000); ioctl = BWN_IOCTL_PHYCLOCK_ENABLE; if ((error = bhnd_write_ioctl(sc->sc_dev, ioctl, ioctl_mask))) { device_printf(sc->sc_dev, "failed to set core ioctl flags: " "%d\n", error); return (error); } DELAY(2000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (g_mode) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); return (0); } static int bwn_phy_getinfo(struct bwn_mac *mac, int gmode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = gmode; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 6) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); /* * For now, just always do full init (ie, what bwn has traditionally * done) */ phy->phy_do_full_init = 1; if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((bhnd_get_hwrev(sc->sc_dev) >= 3) && (bhnd_get_hwrev(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[IEEE80211_MODE_BYTES]; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; DPRINTF(sc, BWN_DEBUG_EEPROM, "%s: called; bg=%d, a=%d\n", __func__, have_bg, have_a); if (have_bg) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, bands); } if (have_a) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11A); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, bands); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, const uint8_t bands[]) { int i, error; for (i = 0, error = 0; i < ci->nchannels && error == 0; i++) { const struct bwn_channel *hc = &ci->channels[i]; error = ieee80211_add_channel(chans, maxchans, nchans, hc->ieee, hc->freq, hc->maxTxPow, 0, bands); } } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct chanAccParams chp; struct wmeParams *wmep; int i; ieee80211_wme_ic_getparams(ic, &chp); BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &chp.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_forceclk(struct bwn_mac *mac, bool force) { struct bwn_softc *sc; bhnd_clock clock; int error; sc = mac->mac_sc; /* On PMU equipped devices, we do not need to force the HT clock */ if (sc->sc_pmu != NULL) return (0); /* Issue a PMU clock request */ if (force) clock = BHND_CLOCK_HT; else clock = BHND_CLOCK_DYN; if ((error = bhnd_request_clock(sc->sc_dev, clock))) { device_printf(sc->sc_dev, "%d clock request failed: %d\n", clock, error); return (error); } return (0); } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); if ((error = bwn_core_forceclk(mac, true))) return (error); if (bhnd_is_hw_suspended(sc->sc_dev)) { if ((error = bwn_reset_core(mac, mac->mac_phy.gmode))) goto fail0; } mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: chip_init\n", __func__); error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, bhnd_get_hwrev(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (sc->sc_board_info.board_flags & BHND_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (sc->sc_board_info.board_flags & BHND_BFL_NOPLLDOWN) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if (sc->sc_quirks & BWN_QUIRK_UCODE_SLOWCLOCK_WAR) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); /* Tell the firmware about the MAC capabilities */ if (bhnd_get_hwrev(sc->sc_dev) >= 13) { uint32_t cap; cap = BWN_READ_4(mac, BWN_MAC_HW_CAP); DPRINTF(sc, BWN_DEBUG_RESET, "%s: hw capabilities: 0x%08x\n", __func__, cap); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_L, cap & 0xffff); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_H, (cap >> 16) & 0xffff); } bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (sc->sc_quirks & BWN_QUIRK_NODMA) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: powerup\n", __func__); if (sc->sc_board_info.board_flags & BHND_BFL_NOPLLDOWN) bwn_core_forceclk(mac, true); else bwn_core_forceclk(mac, false); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: done\n", __func__); return (error); fail0: bhnd_suspend_hw(sc->sc_dev, 0); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: fail\n", __func__); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (bhnd_get_hwrev(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); bhnd_suspend_hw(sc->sc_dev, 0); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; u_int delay; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) return (error); phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) return (error); if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (bhnd_get_hwrev(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (bhnd_get_hwrev(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); bwn_mac_phy_clock_set(mac, true); /* Provide the HT clock transition latency to the MAC core */ error = bhnd_get_clock_latency(sc->sc_dev, BHND_CLOCK_HT, &delay); if (error) { device_printf(sc->sc_dev, "failed to fetch HT clock latency: " "%d\n", error); return (error); } if (delay > UINT16_MAX) { device_printf(sc->sc_dev, "invalid HT clock latency: %u\n", delay); return (ENXIO); } BWN_WRITE_2(mac, BWN_POWERUP_DELAY, delay); return (0); } /* read hostflags */ uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (bhnd_get_hwrev(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (bhnd_get_hwrev(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = bhnd_get_hwrev(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; uint16_t base; base = bwn_dma_base(mac->mac_dmatype, idx); if (mac->mac_dmatype == BHND_DMA_ADDR_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BHND_DMA_ADDR_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = mac->mac_dmatype; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(dr->dr_type, controller_index); dr->dr_index = controller_index; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW351; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW351; break; case BWN_FW_HDR_598: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW598; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW598; break; } } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = contigmalloc( (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF, M_ZERO, 0, BUS_SPACE_MAXADDR, 8, 0); if (dr->dr_txhdr_cache == NULL) { device_printf(sc->sc_dev, "can't allocate TX header DMA memory\n"); goto fail1; } /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail2; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: if (dr->dr_txhdr_cache != NULL) { contigfree(dr->dr_txhdr_cache, (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); if ((*dr)->dr_txhdr_cache != NULL) { contigfree((*dr)->dr_txhdr_cache, ((*dr)->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase; struct bwn_dma *dma; struct bhnd_dma_translation *dt; uint32_t addr, addrext, ctl; int slot; descbase = dr->dr_ring_descbase; dma = &dr->dr_mac->mac_method.dma; dt = &dma->translation; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (dmaaddr & dt->addr_mask) | dt->base_addr; addrext = ((dmaaddr & dt->addrext_mask) >> dma->addrext_shift); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase; struct bwn_dma *dma; struct bhnd_dma_translation *dt; bhnd_addr_t addr; uint32_t addrhi, addrlo; uint32_t addrext; uint32_t ctl0, ctl1; int slot; descbase = dr->dr_ring_descbase; dma = &dr->dr_mac->mac_method.dma; dt = &dma->translation; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (dmaaddr & dt->addr_mask) | dt->base_addr; addrhi = (addr >> 32); addrlo = (addr & UINT32_MAX); addrext = ((dmaaddr & dt->addrext_mask) >> dma->addrext_shift); ctl0 = 0; if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 = 0; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_mac *mac; struct bwn_dma *dma; struct bhnd_dma_translation *dt; bhnd_addr_t addr, paddr; uint32_t addrhi, addrlo, addrext, value; mac = dr->dr_mac; dma = &mac->mac_method.dma; dt = &dma->translation; paddr = dr->dr_ring_dmabase; addr = (paddr & dt->addr_mask) | dt->base_addr; addrhi = (addr >> 32); addrlo = (addr & UINT32_MAX); addrext = ((paddr & dt->addrext_mask) >> dma->addrext_shift); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { value = BWN_DMA64_TXENABLE; value |= BWN_DMA64_TXPARITY_DISABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, addrlo); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, addrhi); } else { value = BWN_DMA32_TXENABLE; value |= BWN_DMA32_TXPARITY_DISABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, addrlo); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= BWN_DMA64_RXPARITY_DISABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, addrlo); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, addrhi); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= BWN_DMA32_RXPARITY_DISABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, addrlo); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BHND_DMA_ADDR_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BHND_DMA_ADDR_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((sc->sc_board_info.board_flags & BHND_BFL_BTCOEX) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (sc->sc_board_info.board_flags & BHND_BFL_BTC2WIRE_ALTGPIO) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (bhnd_get_hwrev(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (bhnd_get_hwrev(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { bwn_phy_exit(mac); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } /** * Request that the GPIO controller tristate all pins set in @p mask, granting * the MAC core control over the pins. * * @param mac bwn MAC state. * @param pins If the bit position for a pin number is set to one, tristate the * pin. */ int bwn_gpio_control(struct bwn_mac *mac, uint32_t pins) { struct bwn_softc *sc; uint32_t flags[32]; int error; sc = mac->mac_sc; /* Determine desired pin flags */ for (size_t pin = 0; pin < nitems(flags); pin++) { uint32_t pinbit = (1 << pin); if (pins & pinbit) { /* Tristate output */ flags[pin] = GPIO_PIN_OUTPUT|GPIO_PIN_TRISTATE; } else { /* Leave unmodified */ flags[pin] = 0; } } /* Configure all pins */ error = GPIO_PIN_CONFIG_32(sc->sc_gpio, 0, nitems(flags), flags); if (error) { device_printf(sc->sc_dev, "error configuring %s pin flags: " "%d\n", device_get_nameunit(sc->sc_gpio), error); return (error); } return (0); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc; uint32_t pins; sc = mac->mac_sc; pins = 0xF; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | pins); if (sc->sc_board_info.board_flags & BHND_BFL_PACTRL) { /* MAC core is responsible for toggling PAREF via gpio9 */ BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | BHND_GPIO_BOARD_PACTRL); pins |= BHND_GPIO_BOARD_PACTRL; } return (bwn_gpio_control(mac, pins)); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (bhnd_get_hwrev(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (sc->sc_cid.chip_id == BHND_CHIPID_BCM4306 && sc->sc_cid.chip_rev == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (bhnd_get_hwrev(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = (ofdm ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_N || phy->type == BWN_PHYTYPE_LP || phy->type == BWN_PHYTYPE_LCN) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); /* XXX TODO: n phy pa override? */ switch (phy->type) { case BWN_PHYTYPE_N: case BWN_PHYTYPE_LCN: BWN_WRITE_2(mac, 0x0502, 0x00d0); break; case BWN_PHYTYPE_LP: BWN_WRITE_2(mac, 0x0502, 0x0050); break; default: BWN_WRITE_2(mac, 0x0502, 0x0030); break; } /* flush */ BWN_READ_2(mac, 0x0502); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BWN_RAM_CONTROL, 4, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) { DPRINTF(sc, BWN_DEBUG_FW, "%s: warn: firmware state (%d)\n", __func__, state); } mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (bhnd_get_hwrev(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: ucstat=%d\n", __func__, ucstat); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = bhnd_get_hwrev(sc->sc_dev); const char *filename; uint16_t iost; int error; /* microcode */ filename = NULL; switch (rev) { case 42: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode42"; break; case 40: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode40"; break; case 33: if (mac->mac_phy.type == BWN_PHYTYPE_LCN40) filename = "ucode33_lcn40"; break; case 30: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode30_mimo"; break; case 29: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode29_mimo"; break; case 26: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode26_mimo"; break; case 28: case 25: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode25_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode25_lcn"; break; case 24: if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode24_lcn"; break; case 23: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; break; case 16: case 17: case 18: case 19: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LP) filename = "ucode16_lp"; break; case 15: filename = "ucode15"; break; case 14: filename = "ucode14"; break; case 13: filename = "ucode13"; break; case 12: case 11: filename = "ucode11"; break; case 10: case 9: case 8: case 7: case 6: case 5: filename = "ucode5"; break; default: device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } device_printf(sc->sc_dev, "ucode fw: %s\n", filename); error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } /* initvals */ error = bhnd_read_iost(sc->sc_dev, &iost); if (error) goto fail1; switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (iost & BWN_IOST_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16initvals30"; else if (rev == 28 || rev == 25) filename = "n0initvals25"; else if (rev == 24) filename = "n0initvals24"; else if (rev == 23) filename = "n0initvals16"; else if (rev >= 16 && rev <= 18) filename = "n0initvals16"; else if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (iost & BWN_IOST_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16bsinitvals30"; else if (rev == 28 || rev == 25) filename = "n0bsinitvals25"; else if (rev == 24) filename = "n0bsinitvals24"; else if (rev == 23) filename = "n0bsinitvals16"; else if (rev >= 16 && rev <= 18) filename = "n0bsinitvals16"; else if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: device_printf(sc->sc_dev, "unknown phy (%d)\n", mac->mac_phy.type); goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d, phy.type %d\n", rev, mac->mac_phy.type); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } /* * Determine firmware header version; needed for TX/RX packet * handling. */ if (mac->mac_fw.rev >= 598) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_598; else if (mac->mac_fw.rev >= 410) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_410; else mac->mac_fw.fw_hdr_format = BWN_FW_HDR_351; /* * We don't support rev 598 or later; that requires * another round of changes to the TX/RX descriptor * and status layout. * * So, complain this is the case and exit out, rather * than attaching and then failing. */ #if 0 if (mac->mac_fw.fw_hdr_format == BWN_FW_HDR_598) { device_printf(sc->sc_dev, "firmware is too new (>=598); not supported\n"); error = EOPNOTSUPP; goto error; } #endif mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (bhnd_get_hwrev(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); DPRINTF(sc, BWN_DEBUG_RF | BWN_DEBUG_PHY | BWN_DEBUG_RESET, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) { err = bwn_phy_reset(down_dev); if (err) goto fail; } up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } /* * PHY reset. */ static int bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc; uint16_t iost, mask; int error; sc = mac->mac_sc; iost = BWN_IOCTL_PHYRESET | BHND_IOCTL_CLK_FORCE; mask = iost | BWN_IOCTL_SUPPORT_G; if ((error = bhnd_write_ioctl(sc->sc_dev, iost, mask))) return (error); DELAY(1000); iost &= ~BHND_IOCTL_CLK_FORCE; if ((error = bhnd_write_ioctl(sc->sc_dev, iost, mask))) return (error); DELAY(1000); return (0); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); DPRINTF(sc, BWN_DEBUG_INTR, "%s: called\n", __func__); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); DPRINTF(sc, BWN_DEBUG_INTR, "%s: reason=0x%08x\n", __func__, reason); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, 0, 0, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, 0, 0, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: stat0=0x%08x, stat1=0x%08x\n", __func__, stat0, stat1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: cookie=%d, seq=%d, phystat=0x%02x, framecnt=%d, " "rtscnt=%d, sreason=%d, pm=%d, im=%d, ampdu=%d, ack=%d\n", __func__, stat.cookie, stat.seq, stat.phy_stat, stat.framecnt, stat.rtscnt, stat.sreason, stat.pm, stat.im, stat.ampdu, stat.ack); bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { bwn_dma_handle_txeof(mac, status); } else { bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) { bus_read_multi_4(sc->sc_mem_res, prq->prq_base + BWN_PIO8_RXDATA, (void *)&rxhdr, sizeof(rxhdr)); } else { bus_read_multi_2(sc->sc_mem_res, prq->prq_base + BWN_PIO_RXDATA, (void *)&rxhdr, sizeof(rxhdr)); } len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr.ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr.ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { bus_read_multi_4(sc->sc_mem_res, prq->prq_base + BWN_PIO8_RXDATA, (void *)mp, (totlen & ~3)); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { bus_read_multi_2(sc->sc_mem_res, prq->prq_base + BWN_PIO_RXDATA, (void *)mp, (totlen & ~1)); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } /* * Post process the RX provided RSSI. * * Valid for A, B, G, LP PHYs. */ static int8_t bwn_rx_rssi_calc(struct bwn_mac *mac, uint8_t in_rssi, int ofdm, int adjust_2053, int adjust_2050) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *gphy = &phy->phy_g; int tmp; switch (phy->rf_ver) { case 0x2050: if (ofdm) { tmp = in_rssi; if (tmp > 127) tmp -= 256; tmp = tmp * 73 / 64; if (adjust_2050) tmp += 25; else tmp -= 3; } else { if (mac->mac_sc->sc_board_info.board_flags & BHND_BFL_ADCDIV) { if (in_rssi > 63) in_rssi = 63; tmp = gphy->pg_nrssi_lt[in_rssi]; tmp = (31 - tmp) * -131 / 128 - 57; } else { tmp = in_rssi; tmp = (31 - tmp) * -149 / 128 - 68; } if (phy->type == BWN_PHYTYPE_G && adjust_2050) tmp += 25; } break; case 0x2060: if (in_rssi > 127) tmp = in_rssi - 256; else tmp = in_rssi; break; default: tmp = in_rssi; tmp = (tmp - 11) * 103 / 64; if (adjust_2053) tmp -= 109; else tmp -= 83; } return (tmp); } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); /* * XXX Note: phy_status3 doesn't exist for HT-PHY; it's only * used for LP-PHY. */ phystat3 = le16toh(rxhdr->ps3.lp.phy_status3); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); chanstat = le16toh(rxhdr->ps4.r351.channel); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); chanstat = le16toh(rxhdr->ps4.r598.channel); break; } phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC) { DPRINTF(sc, BWN_DEBUG_HWCRYPTO, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); } if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* rssi/noise */ switch (phytype) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_B: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: rssi = bwn_rx_rssi_calc(mac, rxhdr->phy.abg.rssi, !! (phystat0 & BWN_RX_PHYST0_OFDM), !! (phystat0 & BWN_RX_PHYST0_GAINCTL), !! (phystat3 & BWN_RX_PHYST3_TRSTATE)); break; case BWN_PHYTYPE_N: /* Broadcom has code for min/avg, but always used max */ if (rxhdr->phy.n.power0 == 16 || rxhdr->phy.n.power0 == 32) rssi = max(rxhdr->phy.n.power1, rxhdr->ps2.n.power2); else rssi = max(rxhdr->phy.n.power0, rxhdr->phy.n.power1); #if 0 DPRINTF(mac->mac_sc, BWN_DEBUG_RECV, "%s: power0=%d, power1=%d, power2=%d\n", __func__, rxhdr->phy.n.power0, rxhdr->phy.n.power1, rxhdr->ps2.n.power2); #endif break; default: /* XXX TODO: implement rssi for other PHYs */ break; } /* * RSSI here is absolute, not relative to the noise floor. */ noise = mac->mac_stats.link_noise; rssi = rssi - noise; /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_ratectl_tx_complete(const struct ieee80211_node *ni, const struct bwn_txstatus *status) { struct ieee80211_ratectl_tx_status txs; int retrycnt = 0; /* * If we don't get an ACK, then we should log the * full framecnt. That may be 0 if it's a PHY * failure, so ensure that gets logged as some * retry attempt. */ txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY; if (status->ack) { txs.status = IEEE80211_RATECTL_TX_SUCCESS; retrycnt = status->framecnt - 1; } else { txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; retrycnt = status->framecnt; if (retrycnt == 0) retrycnt = 1; } txs.long_retries = retrycnt; ieee80211_ratectl_tx_complete(ni, &txs); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); bwn_ratectl_tx_complete(meta->mt_ni, status); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; /* XXX ieee80211_tx_complete()? */ if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ bwn_ratectl_tx_complete(tp->tp_ni, status); if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; bwn_txpwr_result_t result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (sc->sc_board_info.board_vendor == PCI_VENDOR_BROADCOM && sc->sc_board_info.board_type == BHND_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static uint16_t bwn_set_txhdr_phyctl1(struct bwn_mac *mac, uint8_t bitrate) { struct bwn_phy *phy = &mac->mac_phy; uint16_t control = 0; uint16_t bw; /* XXX TODO: this is for LP phy, what about N-PHY, etc? */ bw = BWN_TXH_PHY1_BW_20; if (BWN_ISCCKRATE(bitrate) && phy->type != BWN_PHYTYPE_LP) { control = bw; } else { control = bw; /* Figure out coding rate and modulation */ /* XXX TODO: table-ize, for MCS transmit */ /* Note: this is BWN_*_RATE values */ switch (bitrate) { case BWN_CCK_RATE_1MB: control |= 0; break; case BWN_CCK_RATE_2MB: control |= 1; break; case BWN_CCK_RATE_5MB: control |= 2; break; case BWN_CCK_RATE_11MB: control |= 3; break; case BWN_OFDM_RATE_6MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_9MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_12MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_18MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_24MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_36MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_48MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM64; break; case BWN_OFDM_RATE_54MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM64; break; default: break; } control |= BWN_TXH_PHY1_MODE_SISO; } return control; } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; - struct ieee80211_frame_cts *cts; - struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; + uint8_t *prot_ptr; unsigned int len; uint32_t macctl = 0; - int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; + int rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; int fill_phy_ctl1 = 0; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; if ((phy->type == BWN_PHYTYPE_N) || (phy->type == BWN_PHYTYPE_LP) || (phy->type == BWN_PHYTYPE_HT)) fill_phy_ctl1 = 1; /* * Find TX rate */ if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; /* Note: this maps the select ieee80211 rate to hardware rate */ rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); /* XXX rate/rate_fb is the hardware rate */ if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r351.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r410.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r598.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; /* XXX preamble? obey net80211 */ if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; if (! phy->gmode) macctl |= BWN_TX_MAC_5GHZ; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; - if (ic->ic_flags & IEEE80211_F_USEPROT) { + if ((ic->ic_flags & IEEE80211_F_USEPROT) && + ic->ic_protmode != IEEE80211_PROT_NONE) { /* Note: don't fall back to CCK rates for 5G */ if (phy->gmode) rts_rate = BWN_CCK_RATE_1MB; else rts_rate = BWN_OFDM_RATE_6MB; rts_rate_fb = bwn_get_fbrate(rts_rate); /* XXX 'rate' here is hardware rate now, not the net80211 rate */ - protdur = ieee80211_compute_duration(ic->ic_rt, - m->m_pkthdr.len, rate, isshort) + - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); + mprot = ieee80211_alloc_prot(ni, m, rate, ic->ic_protmode); + if (mprot == NULL) { + if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1); + device_printf(sc->sc_dev, + "could not allocate mbuf for protection mode %d\n", + ic->ic_protmode); + return (ENOBUFS); + } - if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { + switch (mac->mac_fw.fw_hdr_format) { + case BWN_FW_HDR_351: + prot_ptr = txhdr->body.r351.rts_frame; + break; + case BWN_FW_HDR_410: + prot_ptr = txhdr->body.r410.rts_frame; + break; + case BWN_FW_HDR_598: + prot_ptr = txhdr->body.r598.rts_frame; + break; + } - switch (mac->mac_fw.fw_hdr_format) { - case BWN_FW_HDR_351: - cts = (struct ieee80211_frame_cts *) - txhdr->body.r351.rts_frame; - break; - case BWN_FW_HDR_410: - cts = (struct ieee80211_frame_cts *) - txhdr->body.r410.rts_frame; - break; - case BWN_FW_HDR_598: - cts = (struct ieee80211_frame_cts *) - txhdr->body.r598.rts_frame; - break; - } + bcopy(mtod(mprot, uint8_t *), prot_ptr, mprot->m_pkthdr.len); + m_freem(mprot); - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, - protdur); - KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); - bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, - mprot->m_pkthdr.len); - m_freem(mprot); + if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { - switch (mac->mac_fw.fw_hdr_format) { - case BWN_FW_HDR_351: - rts = (struct ieee80211_frame_rts *) - txhdr->body.r351.rts_frame; - break; - case BWN_FW_HDR_410: - rts = (struct ieee80211_frame_rts *) - txhdr->body.r410.rts_frame; - break; - case BWN_FW_HDR_598: - rts = (struct ieee80211_frame_rts *) - txhdr->body.r598.rts_frame; - break; - } - - /* XXX rate/rate_fb is the hardware rate */ - protdur += ieee80211_ack_duration(ic->ic_rt, rate, - isshort); - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, - wh->i_addr2, protdur); - KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); - bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, - mprot->m_pkthdr.len); - m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r351.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r410.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r598.rts_plcp, len, rts_rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: protwh = (struct ieee80211_frame *) &txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: protwh = (struct ieee80211_frame *) &txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: protwh = (struct ieee80211_frame *) &txhdr->body.r598.rts_frame; break; } txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; if (fill_phy_ctl1) { txhdr->phyctl_1rts = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate)); txhdr->phyctl_1rtsfb = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate_fb)); } } if (fill_phy_ctl1) { txhdr->phyctl_1 = htole16(bwn_set_txhdr_phyctl1(mac, rate)); txhdr->phyctl_1fb = htole16(bwn_set_txhdr_phyctl1(mac, rate_fb)); } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: txhdr->body.r351.cookie = htole16(cookie); break; case BWN_FW_HDR_410: txhdr->body.r410.cookie = htole16(cookie); break; case BWN_FW_HDR_598: txhdr->body.r598.cookie = htole16(cookie); break; } txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = sc->sc_ant2g; else mask = sc->sc_ant5g; if (!(mask & (1 << (n - 1)))) return (0); return (n); } /* * Return a fallback rate for the given rate. * * Note: Don't fall back from OFDM to CCK. */ static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { /* CCK */ case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); /* OFDM */ case BWN_OFDM_RATE_6MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bus_write_multi_4(sc->sc_mem_res, tq->tq_base + BWN_PIO8_TXDATA, __DECONST(void *, data), (len & ~3)); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); bus_write_multi_2(sc->sc_mem_res, tq->tq_base + BWN_PIO_TXDATA, __DECONST(void *, data), (len & ~1)); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { /* XXX should exit if 5GHz band .. */ if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); /* Disabled in Linux b43, can adversely effect performance */ #if 0 bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); #endif } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc; if (mac == NULL) return; sc = mac->mac_sc; BWN_LOCK(sc); if (mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; uint16_t mt; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: mt = le16toh(rxhdr->ps4.r351.mac_time); break; case BWN_FW_HDR_598: mt = le16toh(rxhdr->ps4.r598.mac_time); break; } tsf += mt; if (low_mactime_now < mt) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(bhnd_get_hwrev(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma; struct bwn_softc *sc; struct bhnd_dma_translation *dt, dma_translation; bhnd_addr_t addrext_req; bus_dma_tag_t dmat; bus_addr_t lowaddr; u_int addrext_shift, addr_width; int error; dma = &mac->mac_method.dma; sc = mac->mac_sc; dt = NULL; if (sc->sc_quirks & BWN_QUIRK_NODMA) return (0); KASSERT(bhnd_get_hwrev(sc->sc_dev) >= 5, ("%s: fail", __func__)); /* Use the DMA engine's maximum host address width to determine the * addrext constraints, and supported device address width. */ switch (mac->mac_dmatype) { case BHND_DMA_ADDR_30BIT: /* 32-bit engine without addrext support */ addrext_req = 0x0; addrext_shift = 0; /* We can address the full 32-bit device address space */ addr_width = BHND_DMA_ADDR_32BIT; break; case BHND_DMA_ADDR_32BIT: /* 32-bit engine with addrext support */ addrext_req = BWN_DMA32_ADDREXT_MASK; addrext_shift = BWN_DMA32_ADDREXT_SHIFT; addr_width = BHND_DMA_ADDR_32BIT; break; case BHND_DMA_ADDR_64BIT: /* 64-bit engine with addrext support */ addrext_req = BWN_DMA64_ADDREXT_MASK; addrext_shift = BWN_DMA64_ADDREXT_SHIFT; addr_width = BHND_DMA_ADDR_64BIT; break; default: device_printf(sc->sc_dev, "unsupported DMA address width: %d\n", mac->mac_dmatype); return (ENXIO); } /* Fetch our device->host DMA translation and tag */ error = bhnd_get_dma_translation(sc->sc_dev, addr_width, 0, &dmat, &dma_translation); if (error) { device_printf(sc->sc_dev, "error fetching DMA translation: " "%d\n", error); return (error); } /* Verify that our DMA engine's addrext constraints are compatible with * our DMA translation */ if (addrext_req != 0x0 && (dma_translation.addrext_mask & addrext_req) != addrext_req) { device_printf(sc->sc_dev, "bus addrext mask %#jx incompatible " "with device addrext mask %#jx, disabling extended address " "support\n", (uintmax_t)dma_translation.addrext_mask, (uintmax_t)addrext_req); addrext_req = 0x0; addrext_shift = 0; } /* Apply our addrext translation constraint */ dma_translation.addrext_mask = addrext_req; /* Initialize our DMA engine configuration */ mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->addrext_shift = addrext_shift; dma->translation = dma_translation; dt = &dma->translation; /* Dermine our translation's maximum supported address */ lowaddr = MIN((dt->addr_mask | dt->addrext_mask), BUS_SPACE_MAXADDR); /* * Create top level DMA tag */ error = bus_dma_tag_create(dmat, /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static int bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; int error; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (sc->sc_board_info.board_vendor == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; _Static_assert(nitems(bwn_led_vars) == BWN_LED_MAX, "invalid NVRAM variable name array"); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led; uint8_t val; led = &sc->sc_leds[i]; KASSERT(i < nitems(bwn_led_vars), ("unknown LED index")); error = bhnd_nvram_getvar_uint8(sc->sc_dev, bwn_led_vars[i], &val); if (error) { if (error != ENOENT) { device_printf(sc->sc_dev, "NVRAM variable %s " "unreadable: %d", bwn_led_vars[i], error); return (error); } /* Not found; use default */ led->led_act = led_act[i]; } else { if (val & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); return (0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP || mac->mac_phy.type == BWN_PHYTYPE_N) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; DPRINTF(sc, BWN_DEBUG_RESET, "%s: called; cur=%d, prev=%d\n", __func__, cur, prev); if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, bhnd, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, bhnd, 1, 1, 1); MODULE_DEPEND(bwn, gpiobus, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); MODULE_VERSION(bwn, 1); Index: head/sys/dev/ral/rt2560.c =================================================================== --- head/sys/dev/ral/rt2560.c (revision 330687) +++ head/sys/dev/ral/rt2560.c (revision 330688) @@ -1,2767 +1,2752 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2560 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RT2560_RSSI(sc, rssi) \ ((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \ ((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0) #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2560_vap_delete(struct ieee80211vap *); static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2560_alloc_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *, int); static void rt2560_reset_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static void rt2560_free_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static int rt2560_alloc_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *, int); static void rt2560_reset_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static void rt2560_free_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static int rt2560_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t); static void rt2560_encryption_intr(struct rt2560_softc *); static void rt2560_tx_intr(struct rt2560_softc *); static void rt2560_prio_intr(struct rt2560_softc *); static void rt2560_decryption_intr(struct rt2560_softc *); static void rt2560_rx_intr(struct rt2560_softc *); static void rt2560_beacon_update(struct ieee80211vap *, int item); static void rt2560_beacon_expire(struct rt2560_softc *); static void rt2560_wakeup_expire(struct rt2560_softc *); static void rt2560_scan_start(struct ieee80211com *); static void rt2560_scan_end(struct ieee80211com *); static void rt2560_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rt2560_set_channel(struct ieee80211com *); static void rt2560_setup_tx_desc(struct rt2560_softc *, struct rt2560_tx_desc *, uint32_t, int, int, int, bus_addr_t); static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_transmit(struct ieee80211com *, struct mbuf *); static void rt2560_start(struct rt2560_softc *); static void rt2560_watchdog(void *); static void rt2560_parent(struct ieee80211com *); static void rt2560_bbp_write(struct rt2560_softc *, uint8_t, uint8_t); static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t); static void rt2560_rf_write(struct rt2560_softc *, uint8_t, uint32_t); static void rt2560_set_chan(struct rt2560_softc *, struct ieee80211_channel *); #if 0 static void rt2560_disable_rf_tune(struct rt2560_softc *); #endif static void rt2560_enable_tsf_sync(struct rt2560_softc *); static void rt2560_enable_tsf(struct rt2560_softc *); static void rt2560_update_plcp(struct rt2560_softc *); static void rt2560_update_slot(struct ieee80211com *); static void rt2560_set_basicrates(struct rt2560_softc *, const struct ieee80211_rateset *); static void rt2560_update_led(struct rt2560_softc *, int, int); static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *); static void rt2560_set_macaddr(struct rt2560_softc *, const uint8_t *); static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_update_promisc(struct ieee80211com *); static const char *rt2560_get_rf(int); static void rt2560_read_config(struct rt2560_softc *); static int rt2560_bbp_init(struct rt2560_softc *); static void rt2560_set_txantenna(struct rt2560_softc *, int); static void rt2560_set_rxantenna(struct rt2560_softc *, int); static void rt2560_init_locked(struct rt2560_softc *); static void rt2560_init(void *); static void rt2560_stop_locked(struct rt2560_softc *); static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static const struct { uint32_t reg; uint32_t val; } rt2560_def_mac[] = { RT2560_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2560_def_bbp[] = { RT2560_DEF_BBP }; static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2; static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2; static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2; static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2; static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2; static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2; static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2; static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2; static const uint8_t rt2560_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rt2560_chan_5ghz[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161 }; static const struct { uint8_t chan; uint32_t r1, r2, r4; } rt2560_rf5222[] = { RT2560_RF5222 }; int rt2560_attach(device_t dev, int id) { struct rt2560_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; int error; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* retrieve RT2560 rev. no */ sc->asic_rev = RAL_READ(sc, RT2560_CSR0); /* retrieve RF rev. no and various other things from EEPROM */ rt2560_read_config(sc); device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n", sc->asic_rev, rt2560_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring\n"); goto fail1; } error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate ATIM ring\n"); goto fail2; } error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Prio ring\n"); goto fail3; } error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Beacon ring\n"); goto fail4; } error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail5; } /* retrieve MAC address */ rt2560_get_macaddr(sc, ic->ic_macaddr); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ #endif ; rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_raw_xmit = rt2560_raw_xmit; ic->ic_updateslot = rt2560_update_slot; ic->ic_update_promisc = rt2560_update_promisc; ic->ic_scan_start = rt2560_scan_start; ic->ic_scan_end = rt2560_scan_end; ic->ic_getradiocaps = rt2560_getradiocaps; ic->ic_set_channel = rt2560_set_channel; ic->ic_vap_create = rt2560_vap_create; ic->ic_vap_delete = rt2560_vap_delete; ic->ic_parent = rt2560_parent; ic->ic_transmit = rt2560_transmit; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2560_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2560_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)"); if (bootverbose) ieee80211_announce(ic); return 0; fail5: rt2560_free_tx_ring(sc, &sc->bcnq); fail4: rt2560_free_tx_ring(sc, &sc->prioq); fail3: rt2560_free_tx_ring(sc, &sc->atimq); fail2: rt2560_free_tx_ring(sc, &sc->txq); fail1: mtx_destroy(&sc->sc_mtx); return ENXIO; } int rt2560_detach(void *xsc) { struct rt2560_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; rt2560_stop(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2560_free_tx_ring(sc, &sc->txq); rt2560_free_tx_ring(sc, &sc->atimq); rt2560_free_tx_ring(sc, &sc->prioq); rt2560_free_tx_ring(sc, &sc->bcnq); rt2560_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2560_softc *sc = ic->ic_softc; struct rt2560_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2560_newstate; vap->iv_update_beacon = rt2560_beacon_update; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2560_vap_delete(struct ieee80211vap *vap) { struct rt2560_vap *rvp = RT2560_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2560_resume(void *xsc) { struct rt2560_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2560_init(sc); } static void rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2560_free_tx_ring(sc, ring); return error; } static void rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; } static void rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring, int count) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; ring->cur_decrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2560_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2560_free_rx_ring(sc, ring); return error; } static void rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) { ring->desc[i].flags = htole32(RT2560_RX_BUSY); ring->data[i].drop = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; ring->cur_decrypt = 0; } static void rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { struct rt2560_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct rt2560_softc *sc = vap->iv_ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); /* turn association led off */ rt2560_update_led(sc, 0, 0); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2560_update_plcp(sc); rt2560_set_basicrates(sc, &ni->ni_rates); rt2560_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); return ENOBUFS; } ieee80211_ref_node(ni); error = rt2560_tx_bcn(sc, m, ni); if (error != 0) return error; } /* turn association led on */ rt2560_update_led(sc, 1, 0); if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2560_enable_tsf_sync(sc); else rt2560_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); /* write start bit (1) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); /* write READ opcode (10) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D)); RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C); } RT2560_EEPROM_CTL(sc, RT2560_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); tmp = RAL_READ(sc, RT2560_CSR21); val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n; RT2560_EEPROM_CTL(sc, RT2560_S); } RT2560_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_C); return val; } /* * Some frames were processed by the hardware cipher engine and are ready for * transmission. */ static void rt2560_encryption_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; int hw; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr; hw /= RT2560_TX_DESC_SIZE; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); while (sc->txq.next_encrypt != hw) { if (sc->txq.next_encrypt == sc->txq.cur_encrypt) { printf("hw encrypt %d, cur_encrypt %d\n", hw, sc->txq.cur_encrypt); break; } desc = &sc->txq.desc[sc->txq.next_encrypt]; if ((le32toh(desc->flags) & RT2560_TX_BUSY) || (le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY)) break; /* for TKIP, swap eiv field to fix a bug in ASIC */ if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) == RT2560_TX_CIPHER_TKIP) desc->eiv = bswap32(desc->eiv); /* mark the frame ready for transmission */ desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= htole32(RT2560_TX_BUSY); DPRINTFN(sc, 15, "encryption done idx=%u\n", sc->txq.next_encrypt); sc->txq.next_encrypt = (sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); /* kick Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX); } static void rt2560_tx_intr(struct rt2560_softc *sc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *m; struct ieee80211_node *ni; uint32_t flags; int status; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); txs->flags = IEEE80211_RATECTL_STATUS_LONG_RETRY; for (;;) { desc = &sc->txq.desc[sc->txq.next]; data = &sc->txq.data[sc->txq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_CIPHER_BUSY) || !(flags & RT2560_TX_VALID)) break; m = data->m; ni = data->ni; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = 0; DPRINTFN(sc, 10, "%s\n", "data frame sent successfully"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 0; break; case RT2560_TX_SUCCESS_RETRY: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame sent after %u retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 0; break; case RT2560_TX_FAIL_RETRY: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; txs->long_retries = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame failed after %d retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 1; break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending data frame failed " "0x%08x\n", flags); status = 1; } bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.data_dmat, data->map); ieee80211_tx_complete(ni, m, status); data->ni = NULL; data->m = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next); sc->txq.queued--; sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) rt2560_start(sc); } static void rt2560_prio_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_node *ni; struct mbuf *m; int flags; bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->prioq.desc[sc->prioq.next]; data = &sc->prioq.data[sc->prioq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0) break; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully"); break; case RT2560_TX_SUCCESS_RETRY: DPRINTFN(sc, 9, "mgt frame sent after %u retries\n", (flags >> 5) & 0x7); break; case RT2560_TX_FAIL_RETRY: DPRINTFN(sc, 9, "%s\n", "sending mgt frame failed (too much retries)"); break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending mgt frame failed " "0x%08x\n", flags); break; } bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->prioq.data_dmat, data->map); m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next); sc->prioq.queued--; sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, (flags & RT2560_TX_RESULT_MASK) &~ (RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY)); m_freem(m); ieee80211_free_node(ni); } bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) rt2560_start(sc); } /* * Some frames were processed by the hardware cipher engine and are ready for * handoff to the IEEE802.11 layer. */ static void rt2560_decryption_intr(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int hw, error; int8_t rssi, nf; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; hw /= RT2560_RX_DESC_SIZE; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (; sc->rxq.cur_decrypt != hw;) { desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; data = &sc->rxq.data[sc->rxq.cur_decrypt]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; if (data->drop) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 && (le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = RT2560_RSSI(sc, desc->rssi); nf = RT2560_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2560_CSR17); tsf_lo = RAL_READ(sc, RT2560_CSR16); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2560_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RT2560_F_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RT2560_F_INPUT_RUNNING; skip: desc->flags = htole32(RT2560_RX_BUSY); DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt); sc->rxq.cur_decrypt = (sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* * Some frames were received. Pass them to the hardware cipher engine before * sending them to the 802.11 layer. */ static void rt2560_rx_intr(struct rt2560_softc *sc) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; data->drop = 0; if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled RXCSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); data->drop = 1; } if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) { DPRINTFN(sc, 5, "%s\n", "bad length"); data->drop = 1; } /* mark the frame for decryption */ desc->flags |= htole32(RT2560_RX_CIPHER_BUSY); DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); /* kick decrypt */ RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT); } static void rt2560_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; setbit(bo->bo_flags, item); } /* * This function is called periodically in IBSS mode when a new beacon must be * sent out. */ static void rt2560_beacon_expire(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_tx_data *data; if (ic->ic_opmode != IEEE80211_M_IBSS && ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) return; data = &sc->bcnq.data[sc->bcnq.next]; /* * Don't send beacon if bsschan isn't set */ if (data->ni == NULL) return; bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bcnq.data_dmat, data->map); /* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */ ieee80211_beacon_update(data->ni, data->m, 1); rt2560_tx_bcn(sc, data->m, data->ni); DPRINTFN(sc, 15, "%s", "beacon expired\n"); sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT; } /* ARGSUSED */ static void rt2560_wakeup_expire(struct rt2560_softc *sc) { DPRINTFN(sc, 2, "%s", "wakeup expired\n"); } void rt2560_intr(void *arg) { struct rt2560_softc *sc = arg; uint32_t r; RAL_LOCK(sc); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); return; } r = RAL_READ(sc, RT2560_CSR7); RAL_WRITE(sc, RT2560_CSR7, r); if (r & RT2560_BEACON_EXPIRE) rt2560_beacon_expire(sc); if (r & RT2560_WAKEUP_EXPIRE) rt2560_wakeup_expire(sc); if (r & RT2560_ENCRYPTION_DONE) rt2560_encryption_intr(sc); if (r & RT2560_TX_DONE) rt2560_tx_intr(sc); if (r & RT2560_PRIO_DONE) rt2560_prio_intr(sc); if (r & RT2560_DECRYPTION_DONE) rt2560_decryption_intr(sc); if (r & RT2560_RX_DONE) { rt2560_rx_intr(sc); rt2560_encryption_intr(sc); } /* re-enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); RAL_UNLOCK(sc); } #define RAL_SIFS 10 /* us */ #define RT2560_TXRX_TURNAROUND 10 /* us */ static uint8_t rt2560_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc, uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->physaddr = htole32(physaddr); desc->wme = htole16( RT2560_AIFSN(2) | RT2560_LOGCWMIN(3) | RT2560_LOGCWMAX(8)); /* setup PLCP fields */ desc->plcp_signal = rt2560_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2560_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2560_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } if (!encrypt) desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY) : htole32(RT2560_TX_BUSY); } static int rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs, rate, error; desc = &sc->bcnq.desc[sc->bcnq.cur]; data = &sc->bcnq.data[sc->bcnq.cur]; /* XXX maybe a separate beacon rate? */ rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate; error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF | RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr); DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->bcnq.cur, rate); bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map, BUS_DMASYNC_PREWRITE); sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT; return 0; } static int rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = ni->ni_txparms->mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2560_TX_TIMESTAMP; } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_sendprot(struct rt2560_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; - const struct ieee80211_frame *wh; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *mprot; - int protrate, ackrate, pktlen, flags, isshort, error; - uint16_t dur; + int protrate, flags, error; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs; - KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, - ("protection %d", prot)); - - wh = mtod(m, const struct ieee80211_frame *); - pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; - - protrate = ieee80211_ctl_rate(ic->ic_rt, rate); - ackrate = ieee80211_ack_rate(ic->ic_rt, rate); - - isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; - dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); - flags = RT2560_TX_MORE_FRAG; - if (prot == IEEE80211_PROT_RTSCTS) { - /* NB: CTS is the same size as an ACK */ - dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); - flags |= RT2560_TX_ACK; - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); - } else { - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); - } + mprot = ieee80211_alloc_prot(ni, m, rate, prot); if (mprot == NULL) { - /* XXX stat + msg */ + if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); + device_printf(sc->sc_dev, + "could not allocate mbuf for protection mode %d\n", prot); return ENOBUFS; } desc = &sc->txq.desc[sc->txq.cur_encrypt]; data = &sc->txq.data[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; + + protrate = ieee80211_ctl_rate(ic->ic_rt, rate); + flags = RT2560_TX_MORE_FRAG; + if (prot == IEEE80211_PROT_RTSCTS) + flags |= RT2560_TX_ACK; rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; return 0; } static int rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint32_t flags; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2560_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rt2560_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(ni->ni_vap, m0); } data->m = m0; data->ni = ni; /* XXX need to setup descriptor ourself */ rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags; int nsegs, rate, error; wh = mtod(m0, struct ieee80211_frame *); if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2560_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } } data = &sc->txq.data[sc->txq.cur_encrypt]; desc = &sc->txq.desc[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->txq.cur_encrypt, rate); /* kick encrypt */ sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT); return 0; } static int rt2560_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2560_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2560_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2560_start(struct rt2560_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc); while (sc->txq.queued < RT2560_TX_RING_COUNT - 1 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2560_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static void rt2560_watchdog(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RT2560_F_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; rt2560_encryption_intr(sc); rt2560_tx_intr(sc); if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2560_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2560_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_parent(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { rt2560_init_locked(sc); startall = 1; } else rt2560_update_promisc(ic); } else if (sc->sc_flags & RT2560_F_RUNNING) rt2560_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val; RAL_WRITE(sc, RT2560_BBPCSR, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2560_BBP_BUSY | reg << 8; RAL_WRITE(sc, RT2560_BBPCSR, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2560_BBPCSR); if (!(val & RT2560_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); RAL_WRITE(sc, RT2560_RFCSR, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RT2560_RF_2522: rt2560_rf_write(sc, RAL_RF1, 0x00814); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RT2560_RF_2523: rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2524: rt2560_rf_write(sc, RAL_RF1, 0x0c808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525E: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RT2560_RF_2526: rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RT2560_RF_5222: for (i = 0; rt2560_rf5222[i].chan != chan; i++); rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1); rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4); break; default: printf("unknown ral rev=%d\n", sc->rf_rev); } /* XXX */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = rt2560_bbp_read(sc, 70); tmp &= ~RT2560_JAPAN_FILTER; if (chan == 14) tmp |= RT2560_JAPAN_FILTER; rt2560_bbp_write(sc, 70, tmp); /* clear CRC errors */ RAL_READ(sc, RT2560_CNT0); } } static void rt2560_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rt2560_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rt2560_chan_2ghz, nitems(rt2560_chan_2ghz), bands, 0); if (sc->rf_rev == RT2560_RF_5222) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rt2560_chan_5ghz, nitems(rt2560_chan_5ghz), bands, 0); } } static void rt2560_set_channel(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2560_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } #if 0 /* * Disable RF auto-tuning. */ static void rt2560_disable_rf_tune(struct rt2560_softc *sc) { uint32_t tmp; if (sc->rf_rev != RT2560_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; rt2560_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; rt2560_rf_write(sc, RAL_RF3, tmp); DPRINTFN(sc, 2, "%s", "disabling RF autotune\n"); } #endif /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void rt2560_enable_tsf_sync(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload; uint32_t tmp; /* first, disable TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); tmp = 16 * vap->iv_bss->ni_intval; RAL_WRITE(sc, RT2560_CSR12, tmp); RAL_WRITE(sc, RT2560_CSR13, 0); logcwmin = 5; preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024; tmp = logcwmin << 16 | preload; RAL_WRITE(sc, RT2560_BCNOCSR, tmp); /* finally, enable TSF synchronization */ tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2560_ENABLE_TSF_SYNC(1); else tmp |= RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_BEACON_GENERATOR; RAL_WRITE(sc, RT2560_CSR14, tmp); DPRINTF(sc, "%s", "enabling TSF synchronization\n"); } static void rt2560_enable_tsf(struct rt2560_softc *sc) { RAL_WRITE(sc, RT2560_CSR14, 0); RAL_WRITE(sc, RT2560_CSR14, RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF); } static void rt2560_update_plcp(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* no short preamble for 1Mbps */ RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400); if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) { /* values taken from the reference driver */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403); } else { /* same values as above or'ed 0x8 */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b); } DPRINTF(sc, "updating PLCP for %s preamble\n", (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long"); } /* * This function can be called by ieee80211_set_shortslottime(). Refer to * IEEE Std 802.11-1999 pp. 85 to know how these values are computed. */ static void rt2560_update_slot(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint8_t slottime; uint16_t tx_sifs, tx_pifs, tx_difs, eifs; uint32_t tmp; #ifndef FORCE_SLOTTIME slottime = IEEE80211_GET_SLOTTIME(ic); #else /* * Setting slot time according to "short slot time" capability * in beacon/probe_resp seems to cause problem to acknowledge * certain AP's data frames transimitted at CCK/DS rates: the * problematic AP keeps retransmitting data frames, probably * because MAC level acks are not received by hardware. * So we cheat a little bit here by claiming we are capable of * "short slot time" but setting hardware slot time to the normal * slot time. ral(4) does not seem to have trouble to receive * frames transmitted using short slot time even if hardware * slot time is set to normal slot time. If we didn't use this * trick, we would have to claim that short slot time is not * supported; this would give relative poor RX performance * (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short * slot time. */ slottime = IEEE80211_DUR_SLOT; #endif /* update the MAC slot boundaries */ tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND; tx_pifs = tx_sifs + slottime; tx_difs = IEEE80211_DUR_DIFS(tx_sifs, slottime); eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60; tmp = RAL_READ(sc, RT2560_CSR11); tmp = (tmp & ~0x1f00) | slottime << 8; RAL_WRITE(sc, RT2560_CSR11, tmp); tmp = tx_pifs << 16 | tx_sifs; RAL_WRITE(sc, RT2560_CSR18, tmp); tmp = eifs << 16 | tx_difs; RAL_WRITE(sc, RT2560_CSR19, tmp); DPRINTF(sc, "setting slottime to %uus\n", slottime); } static void rt2560_set_basicrates(struct rt2560_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } static void rt2560_update_led(struct rt2560_softc *sc, int led1, int led2) { uint32_t tmp; /* set ON period to 70ms and OFF period to 30ms */ tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30; RAL_WRITE(sc, RT2560_LEDCSR, tmp); } static void rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2560_CSR5, tmp); tmp = bssid[4] | bssid[5] << 8; RAL_WRITE(sc, RT2560_CSR6, tmp); DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":"); } static void rt2560_set_macaddr(struct rt2560_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2560_CSR3, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2560_CSR4, tmp); DPRINTF(sc, "setting MAC address to %6D\n", addr, ":"); } static void rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = RAL_READ(sc, RT2560_CSR3); addr[0] = tmp & 0xff; addr[1] = (tmp >> 8) & 0xff; addr[2] = (tmp >> 16) & 0xff; addr[3] = (tmp >> 24); tmp = RAL_READ(sc, RT2560_CSR4); addr[4] = tmp & 0xff; addr[5] = (tmp >> 8) & 0xff; } static void rt2560_update_promisc(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2560_RXCSR0); tmp &= ~RT2560_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2560_RXCSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } static const char * rt2560_get_rf(int rev) { switch (rev) { case RT2560_RF_2522: return "RT2522"; case RT2560_RF_2523: return "RT2523"; case RT2560_RF_2524: return "RT2524"; case RT2560_RF_2525: return "RT2525"; case RT2560_RF_2525E: return "RT2525e"; case RT2560_RF_2526: return "RT2526"; case RT2560_RF_5222: return "RT5222"; default: return "unknown"; } } static void rt2560_read_config(struct rt2560_softc *sc) { uint16_t val; int i; val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read default values for BBP registers */ for (i = 0; i < 16; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; } /* read Tx power for all b/g channels */ for (i = 0; i < 14 / 2; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i); sc->txpow[i * 2] = val & 0xff; sc->txpow[i * 2 + 1] = val >> 8; } for (i = 0; i < 14; ++i) { if (sc->txpow[i] > 31) sc->txpow[i] = 24; } val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE); if ((val & 0xff) == 0xff) sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR; else sc->rssi_corr = val & 0xff; DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n", sc->rssi_corr, val); } static void rt2560_scan_start(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); rt2560_set_bssid(sc, ieee80211broadcastaddr); } static void rt2560_scan_end(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; struct ieee80211vap *vap = ic->ic_scan->ss_vap; rt2560_enable_tsf_sync(sc); /* XXX keep local copy */ rt2560_set_bssid(sc, vap->iv_bss->ni_bssid); } static int rt2560_bbp_init(struct rt2560_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2560_def_bbp); i++) { rt2560_bbp_write(sc, rt2560_def_bbp[i].reg, rt2560_def_bbp[i].val); } /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0) break; rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */ return 0; } static void rt2560_set_txantenna(struct rt2560_softc *sc, int antenna) { uint32_t tmp; uint8_t tx; tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) tx |= RT2560_BBP_ANTA; else if (antenna == 2) tx |= RT2560_BBP_ANTB; else tx |= RT2560_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 || sc->rf_rev == RT2560_RF_5222) tx |= RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_TX, tx); /* update values for CCK and OFDM in BBPCSR1 */ tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007; tmp |= (tx & 0x7) << 16 | (tx & 0x7); RAL_WRITE(sc, RT2560_BBPCSR1, tmp); } static void rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna) { uint8_t rx; rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) rx |= RT2560_BBP_ANTA; else if (antenna == 2) rx |= RT2560_BBP_ANTB; else rx |= RT2560_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526) rx &= ~RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_RX, rx); } static void rt2560_init_locked(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i; RAL_LOCK_ASSERT(sc); rt2560_stop_locked(sc); /* setup tx rings */ tmp = RT2560_PRIO_RING_COUNT << 24 | RT2560_ATIM_RING_COUNT << 16 | RT2560_TX_RING_COUNT << 8 | RT2560_TX_DESC_SIZE; /* rings must be initialized in this exact order */ RAL_WRITE(sc, RT2560_TXCSR2, tmp); RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr); RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr); RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr); RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr); /* setup rx ring */ tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE; RAL_WRITE(sc, RT2560_RXCSR1, tmp); RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2560_def_mac); i++) RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val); rt2560_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set basic rate set (will be updated later) */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153); rt2560_update_slot(ic); rt2560_update_plcp(sc); rt2560_update_led(sc, 0, 0); RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY); if (rt2560_bbp_init(sc) != 0) { rt2560_stop_locked(sc); return; } rt2560_set_txantenna(sc, sc->tx_ant); rt2560_set_rxantenna(sc, sc->rx_ant); /* set default BSS channel */ rt2560_set_chan(sc, ic->ic_curchan); /* kick Rx */ tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2560_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2560_RXCSR0, tmp); /* clear old FCS and Rx FIFO errors */ RAL_READ(sc, RT2560_CNT0); RAL_READ(sc, RT2560_CNT4); /* clear any pending interrupts */ RAL_WRITE(sc, RT2560_CSR7, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); sc->sc_flags |= RT2560_F_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_init(void *priv) { struct rt2560_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2560_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RT2560_F_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rt2560_stop_locked(struct rt2560_softc *sc) { volatile int *flags = &sc->sc_flags; RAL_LOCK_ASSERT(sc); while (*flags & RT2560_F_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RT2560_F_RUNNING) { sc->sc_flags &= ~RT2560_F_RUNNING; /* abort Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX); /* disable Rx */ RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX); /* reset ASIC (imply reset BBP) */ RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* reset Tx and Rx rings */ rt2560_reset_tx_ring(sc, &sc->txq); rt2560_reset_tx_ring(sc, &sc->atimq); rt2560_reset_tx_ring(sc, &sc->prioq); rt2560_reset_tx_ring(sc, &sc->bcnq); rt2560_reset_rx_ring(sc, &sc->rxq); } } void rt2560_stop(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK(sc); rt2560_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rt2560_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rt2560_tx_raw(sc, m, ni, params)) goto bad; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } Index: head/sys/dev/ral/rt2661.c =================================================================== --- head/sys/dev/ral/rt2661.c (revision 330687) +++ head/sys/dev/ral/rt2661.c (revision 330688) @@ -1,2794 +1,2779 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2561, RT2561S and RT2661 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2661_vap_delete(struct ieee80211vap *); static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2661_alloc_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *, int); static void rt2661_reset_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_free_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static int rt2661_alloc_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *, int); static void rt2661_reset_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static void rt2661_free_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static int rt2661_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t); static void rt2661_rx_intr(struct rt2661_softc *); static void rt2661_tx_intr(struct rt2661_softc *); static void rt2661_tx_dma_intr(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_mcu_beacon_expire(struct rt2661_softc *); static void rt2661_mcu_wakeup(struct rt2661_softc *); static void rt2661_mcu_cmd_intr(struct rt2661_softc *); static void rt2661_scan_start(struct ieee80211com *); static void rt2661_scan_end(struct ieee80211com *); static void rt2661_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rt2661_set_channel(struct ieee80211com *); static void rt2661_setup_tx_desc(struct rt2661_softc *, struct rt2661_tx_desc *, uint32_t, uint16_t, int, int, const bus_dma_segment_t *, int, int); static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *, int); static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *); static int rt2661_transmit(struct ieee80211com *, struct mbuf *); static void rt2661_start(struct rt2661_softc *); static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rt2661_watchdog(void *); static void rt2661_parent(struct ieee80211com *); static void rt2661_bbp_write(struct rt2661_softc *, uint8_t, uint8_t); static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t); static void rt2661_rf_write(struct rt2661_softc *, uint8_t, uint32_t); static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t, uint16_t); static void rt2661_select_antenna(struct rt2661_softc *); static void rt2661_enable_mrr(struct rt2661_softc *); static void rt2661_set_txpreamble(struct rt2661_softc *); static void rt2661_set_basicrates(struct rt2661_softc *, const struct ieee80211_rateset *); static void rt2661_select_band(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_chan(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_bssid(struct rt2661_softc *, const uint8_t *); static void rt2661_set_macaddr(struct rt2661_softc *, const uint8_t *); static void rt2661_update_promisc(struct ieee80211com *); static int rt2661_wme_update(struct ieee80211com *) __unused; static void rt2661_update_slot(struct ieee80211com *); static const char *rt2661_get_rf(int); static void rt2661_read_eeprom(struct rt2661_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int rt2661_bbp_init(struct rt2661_softc *); static void rt2661_init_locked(struct rt2661_softc *); static void rt2661_init(void *); static void rt2661_stop_locked(struct rt2661_softc *); static void rt2661_stop(void *); static int rt2661_load_microcode(struct rt2661_softc *); #ifdef notyet static void rt2661_rx_tune(struct rt2661_softc *); static void rt2661_radar_start(struct rt2661_softc *); static int rt2661_radar_stop(struct rt2661_softc *); #endif static int rt2661_prepare_beacon(struct rt2661_softc *, struct ieee80211vap *); static void rt2661_enable_tsf_sync(struct rt2661_softc *); static void rt2661_enable_tsf(struct rt2661_softc *); static int rt2661_get_rssi(struct rt2661_softc *, uint8_t); static const struct { uint32_t reg; uint32_t val; } rt2661_def_mac[] = { RT2661_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2661_def_bbp[] = { RT2661_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2661_rf5225_1[] = { RT2661_RF5225_1 }, rt2661_rf5225_2[] = { RT2661_RF5225_2 }; static const uint8_t rt2661_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rt2661_chan_5ghz[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 }; int rt2661_attach(device_t dev, int id) { struct rt2661_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint32_t val; int error, ac, ntries; sc->sc_id = id; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* wait for NIC to initialize */ for (ntries = 0; ntries < 1000; ntries++) { if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } /* retrieve RF rev. no and various other things from EEPROM */ rt2661_read_eeprom(sc, ic->ic_macaddr); device_printf(dev, "MAC/BBP RT%X, RF %s\n", val, rt2661_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ for (ac = 0; ac < 4; ac++) { error = rt2661_alloc_tx_ring(sc, &sc->txq[ac], RT2661_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", ac); goto fail2; } } error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Mgt ring\n"); goto fail2; } error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail3; } ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_WME /* 802.11e */ #endif ; rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); #if 0 ic->ic_wme.wme_update = rt2661_wme_update; #endif ic->ic_scan_start = rt2661_scan_start; ic->ic_scan_end = rt2661_scan_end; ic->ic_getradiocaps = rt2661_getradiocaps; ic->ic_set_channel = rt2661_set_channel; ic->ic_updateslot = rt2661_update_slot; ic->ic_update_promisc = rt2661_update_promisc; ic->ic_raw_xmit = rt2661_raw_xmit; ic->ic_transmit = rt2661_transmit; ic->ic_parent = rt2661_parent; ic->ic_vap_create = rt2661_vap_create; ic->ic_vap_delete = rt2661_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2661_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2661_RX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2661_free_tx_ring(sc, &sc->mgtq); fail2: while (--ac >= 0) rt2661_free_tx_ring(sc, &sc->txq[ac]); fail1: mtx_destroy(&sc->sc_mtx); return error; } int rt2661_detach(void *xsc) { struct rt2661_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2661_free_tx_ring(sc, &sc->txq[0]); rt2661_free_tx_ring(sc, &sc->txq[1]); rt2661_free_tx_ring(sc, &sc->txq[2]); rt2661_free_tx_ring(sc, &sc->txq[3]); rt2661_free_tx_ring(sc, &sc->mgtq); rt2661_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2661_softc *sc = ic->ic_softc; struct rt2661_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2661_newstate; #if 0 vap->iv_update_beacon = rt2661_beacon_update; #endif ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2661_vap_delete(struct ieee80211vap *vap) { struct rt2661_vap *rvp = RT2661_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2661_shutdown(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_suspend(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_resume(void *xsc) { struct rt2661_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2661_init(sc); } static void rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = ring->stat = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2661_free_tx_ring(sc, ring); return error; } static void rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = ring->stat = 0; } static void rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring, int count) { struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2661_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2661_free_rx_ring(sc, ring); return error; } static void rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) ring->desc[i].flags = htole32(RT2661_RX_BUSY); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; } static void rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { struct rt2661_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2661_vap *rvp = RT2661_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2661_softc *sc = ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2661_enable_mrr(sc); rt2661_set_txpreamble(sc); rt2661_set_basicrates(sc, &ni->ni_rates); rt2661_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { error = rt2661_prepare_beacon(sc, vap); if (error != 0) return error; } if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2661_enable_tsf_sync(sc); else rt2661_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); /* write start bit (1) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); /* write READ opcode (10) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D)); RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C); } RT2661_EEPROM_CTL(sc, RT2661_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); tmp = RAL_READ(sc, RT2661_E2PROM_CSR); val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n; RT2661_EEPROM_CTL(sc, RT2661_S); } RT2661_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_C); return val; } static void rt2661_tx_intr(struct rt2661_softc *sc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct rt2661_tx_ring *txq; struct rt2661_tx_data *data; uint32_t val; int error, qid; txs->flags = IEEE80211_RATECTL_TX_FAIL_LONG; for (;;) { struct ieee80211_node *ni; struct mbuf *m; val = RAL_READ(sc, RT2661_STA_CSR4); if (!(val & RT2661_TX_STAT_VALID)) break; /* retrieve the queue in which this frame was sent */ qid = RT2661_TX_QID(val); txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq; /* retrieve rate control algorithm context */ data = &txq->data[txq->stat]; m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* if no frame has been sent, ignore */ if (ni == NULL) continue; switch (RT2661_TX_RESULT(val)) { case RT2661_TX_SUCCESS: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 10, "data frame sent successfully after " "%d retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); error = 0; break; case RT2661_TX_RETRY_FAIL: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; txs->long_retries = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 9, "%s\n", "sending data frame failed (too much retries)"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); error = 1; break; default: /* other failure */ device_printf(sc->sc_dev, "sending data frame failed 0x%08x\n", val); error = 1; } DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat); txq->queued--; if (++txq->stat >= txq->count) /* faster than % count */ txq->stat = 0; ieee80211_tx_complete(ni, m, error); } sc->sc_tx_timer = 0; rt2661_start(sc); } static void rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &txq->desc[txq->next]; data = &txq->data[txq->next]; if ((le32toh(desc->flags) & RT2661_TX_BUSY) || !(le32toh(desc->flags) & RT2661_TX_VALID)) break; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2661_TX_VALID); DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next); if (++txq->next >= txq->count) /* faster than % count */ txq->next = 0; } bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); } static void rt2661_rx_intr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int error; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { int8_t rssi, nf; desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if (le32toh(desc->flags) & RT2661_RX_BUSY) break; if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled TXRX_CSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rt2661_get_rssi(sc, desc->rssi); /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ nf = RT2661_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13); tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2661_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RAL_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RAL_INPUT_RUNNING; skip: desc->flags |= htole32(RT2661_RX_BUSY); DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* ARGSUSED */ static void rt2661_mcu_beacon_expire(struct rt2661_softc *sc) { /* do nothing */ } static void rt2661_mcu_wakeup(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16); RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7); RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18); RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20); /* send wakeup command to MCU */ rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0); } static void rt2661_mcu_cmd_intr(struct rt2661_softc *sc) { RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); } void rt2661_intr(void *arg) { struct rt2661_softc *sc = arg; uint32_t r1, r2; RAL_LOCK(sc); /* disable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); return; } r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1); r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2); if (r1 & RT2661_MGT_DONE) rt2661_tx_dma_intr(sc, &sc->mgtq); if (r1 & RT2661_RX_DONE) rt2661_rx_intr(sc); if (r1 & RT2661_TX0_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[0]); if (r1 & RT2661_TX1_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[1]); if (r1 & RT2661_TX2_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[2]); if (r1 & RT2661_TX3_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[3]); if (r1 & RT2661_TX_DONE) rt2661_tx_intr(sc); if (r2 & RT2661_MCU_CMD_DONE) rt2661_mcu_cmd_intr(sc); if (r2 & RT2661_MCU_BEACON_EXPIRE) rt2661_mcu_beacon_expire(sc); if (r2 & RT2661_MCU_WAKEUP) rt2661_mcu_wakeup(sc); /* re-enable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); RAL_UNLOCK(sc); } static uint8_t rt2661_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate, const bus_dma_segment_t *segs, int nsegs, int ac) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int i, remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID); desc->xflags = htole16(xflags); desc->xflags |= htole16(nsegs << 13); desc->wme = htole16( RT2661_QID(ac) | RT2661_AIFSN(2) | RT2661_LOGCWMIN(4) | RT2661_LOGCWMAX(10)); /* * Remember in which queue this frame was sent. This field is driver * private data only. It will be made available by the NIC in STA_CSR4 * on Tx interrupts. */ desc->qid = ac; /* setup PLCP fields */ desc->plcp_signal = rt2661_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2661_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2661_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } /* RT2x61 supports scatter with up to 5 segments */ for (i = 0; i < nsegs; i++) { desc->addr[i] = htole32(segs[i].ds_addr); desc->len [i] = htole16(segs[i].ds_len); } } static int rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; /* XXX HWSEQ */ int nsegs, rate, error; desc = &sc->mgtq.desc[sc->mgtq.cur]; data = &sc->mgtq.data[sc->mgtq.cur]; rate = ni->ni_txparms->mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp in probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2661_TX_TIMESTAMP; } rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */, m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT); bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->mgtq.cur, rate); /* kick mgt */ sc->mgtq.queued++; sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT); return 0; } static int rt2661_sendprot(struct rt2661_softc *sc, int ac, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; - const struct ieee80211_frame *wh; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct mbuf *mprot; - int protrate, ackrate, pktlen, flags, isshort, error; - uint16_t dur; + int protrate, flags, error; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; int nsegs; - KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, - ("protection %d", prot)); - - wh = mtod(m, const struct ieee80211_frame *); - pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; - - protrate = ieee80211_ctl_rate(ic->ic_rt, rate); - ackrate = ieee80211_ack_rate(ic->ic_rt, rate); - - isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; - dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); - flags = RT2661_TX_MORE_FRAG; - if (prot == IEEE80211_PROT_RTSCTS) { - /* NB: CTS is the same size as an ACK */ - dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); - flags |= RT2661_TX_NEED_ACK; - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); - } else { - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); - } + mprot = ieee80211_alloc_prot(ni, m, rate, prot); if (mprot == NULL) { - /* XXX stat + msg */ + if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); + device_printf(sc->sc_dev, + "could not allocate mbuf for protection mode %d\n", prot); return ENOBUFS; } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; + + protrate = ieee80211_ctl_rate(ic->ic_rt, rate); + flags = RT2661_TX_MORE_FRAG; + if (prot == IEEE80211_PROT_RTSCTS) + flags |= RT2661_TX_NEED_ACK; rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate, segs, 1, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; return 0; } static int rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags; int error, nsegs, rate, noack = 0; wh = mtod(m0, struct ieee80211_frame *); if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) noack = !! ieee80211_wme_vap_ac_is_noack(vap, ac); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2661_sendprot(sc, ac, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS; } } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs, nsegs, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, txq->cur, rate); /* kick Tx */ txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac); return 0; } static int rt2661_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2661_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RAL_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2661_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2661_start(struct rt2661_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; int ac; RAL_LOCK_ASSERT(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING) || sc->sc_invalid) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ac = M_WME_GETAC(m); if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) { /* there is no place left in this ring */ mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2661_tx_data(sc, m, ni, ac) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static int rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (rt2661_tx_mgt(sc, m, ni) != 0) goto bad; sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } static void rt2661_watchdog(void *arg) { struct rt2661_softc *sc = (struct rt2661_softc *)arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RAL_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2661_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2661_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_parent(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RAL_RUNNING) == 0) { rt2661_init_locked(sc); startall = 1; } else rt2661_update_promisc(ic); } else if (sc->sc_flags & RAL_RUNNING) rt2661_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val; RAL_WRITE(sc, RT2661_PHY_CSR3, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8; RAL_WRITE(sc, RT2661_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2661_PHY_CSR3); if (!(val & RT2661_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2661_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff); } static int rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg) { if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY) return EIO; /* there is already a command pending */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd); return 0; } static void rt2661_select_antenna(struct rt2661_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rt2661_bbp_read(sc, 4); bbp77 = rt2661_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 4, bbp4); rt2661_bbp_write(sc, 77, bbp77); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rt2661_enable_mrr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2661_MRR_CCK_FALLBACK; tmp |= RT2661_MRR_ENABLED; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_txpreamble(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2661_SHORT_PREAMBLE; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_basicrates(struct rt2661_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2661_TXRX_CSR5, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } rt2661_bbp_write(sc, 17, bbp17); rt2661_bbp_write(sc, 96, bbp96); rt2661_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rt2661_bbp_write(sc, 75, 0x80); rt2661_bbp_write(sc, 86, 0x80); rt2661_bbp_write(sc, 88, 0x80); } rt2661_bbp_write(sc, 35, bbp35); rt2661_bbp_write(sc, 97, bbp97); rt2661_bbp_write(sc, 98, bbp98); tmp = RAL_READ(sc, RT2661_PHY_CSR0); tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2661_PA_PE_2GHZ; else tmp |= RT2661_PA_PE_5GHZ; RAL_WRITE(sc, RT2661_PHY_CSR0, tmp); } static void rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != sc->sc_curchan->ic_flags) { rt2661_select_band(sc, c); rt2661_select_antenna(sc); } sc->sc_curchan = c; rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rt2661_bbp_read(sc, 3); bbp3 &= ~RT2661_SMART_MODE; if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529) bbp3 |= RT2661_SMART_MODE; rt2661_bbp_write(sc, 3, bbp3); if (bbp94 != RT2661_BBPR94_DEFAULT) rt2661_bbp_write(sc, 94, bbp94); /* 5GHz radio needs a 1ms delay here */ if (IEEE80211_IS_CHAN_5GHZ(c)) DELAY(1000); } static void rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16; RAL_WRITE(sc, RT2661_MAC_CSR5, tmp); } static void rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2661_MAC_CSR3, tmp); } static void rt2661_update_promisc(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR0); tmp &= ~RT2661_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } /* * Update QoS (802.11e) settings for each h/w Tx ring. */ static int rt2661_wme_update(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; struct chanAccParams chp; const struct wmeParams *wmep; ieee80211_wme_ic_getparams(ic, &chp); wmep = chp.cap_wmeParams; /* XXX: not sure about shifts. */ /* XXX: the reference driver plays with AC_VI settings too. */ /* update TxOp */ RAL_WRITE(sc, RT2661_AC_TXOP_CSR0, wmep[WME_AC_BE].wmep_txopLimit << 16 | wmep[WME_AC_BK].wmep_txopLimit); RAL_WRITE(sc, RT2661_AC_TXOP_CSR1, wmep[WME_AC_VI].wmep_txopLimit << 16 | wmep[WME_AC_VO].wmep_txopLimit); /* update CWmin */ RAL_WRITE(sc, RT2661_CWMIN_CSR, wmep[WME_AC_BE].wmep_logcwmin << 12 | wmep[WME_AC_BK].wmep_logcwmin << 8 | wmep[WME_AC_VI].wmep_logcwmin << 4 | wmep[WME_AC_VO].wmep_logcwmin); /* update CWmax */ RAL_WRITE(sc, RT2661_CWMAX_CSR, wmep[WME_AC_BE].wmep_logcwmax << 12 | wmep[WME_AC_BK].wmep_logcwmax << 8 | wmep[WME_AC_VI].wmep_logcwmax << 4 | wmep[WME_AC_VO].wmep_logcwmax); /* update Aifsn */ RAL_WRITE(sc, RT2661_AIFSN_CSR, wmep[WME_AC_BE].wmep_aifsn << 12 | wmep[WME_AC_BK].wmep_aifsn << 8 | wmep[WME_AC_VI].wmep_aifsn << 4 | wmep[WME_AC_VO].wmep_aifsn); return 0; } static void rt2661_update_slot(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint8_t slottime; uint32_t tmp; slottime = IEEE80211_GET_SLOTTIME(ic); tmp = RAL_READ(sc, RT2661_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; RAL_WRITE(sc, RT2661_MAC_CSR9, tmp); } static const char * rt2661_get_rf(int rev) { switch (rev) { case RT2661_RF_5225: return "RT5225"; case RT2661_RF_5325: return "RT5325 (MIMO XR)"; case RT2661_RF_2527: return "RT2527"; case RT2661_RF_2529: return "RT2529 (MIMO XR)"; default: return "unknown"; } } static void rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { uint16_t val; int i; /* read MAC address */ val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA); /* XXX: test if different from 0xffff? */ sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(sc, "RF revision=%d\n", sc->rf_rev); val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; /* adjust RSSI correction for external low-noise amplifier */ if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET); if ((val >> 8) != 0xff) sc->rfprog = (val >> 8) & 0x3; if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq); /* read Tx power for all a/b/g channels */ for (i = 0; i < 19; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i); sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]); sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]); } /* read vendor-specific BBP values */ for (i = 0; i < 16; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; /* skip invalid entries */ sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } } static int rt2661_bbp_init(struct rt2661_softc *sc) { int i, ntries; uint8_t val; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { val = rt2661_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2661_def_bbp); i++) { rt2661_bbp_write(sc, rt2661_def_bbp[i].reg, rt2661_def_bbp[i].val); } /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0) continue; rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rt2661_init_locked(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp, sta[3]; int i, error, ntries; RAL_LOCK_ASSERT(sc); if ((sc->sc_flags & RAL_FW_LOADED) == 0) { error = rt2661_load_microcode(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load 8051 microcode, error %d\n", __func__, error); return; } sc->sc_flags |= RAL_FW_LOADED; } rt2661_stop_locked(sc); /* initialize Tx rings */ RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr); RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr); RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr); RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr); /* initialize Mgt ring */ RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr); /* initialize Rx ring */ RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr); /* initialize Tx rings sizes */ RAL_WRITE(sc, RT2661_TX_RING_CSR0, RT2661_TX_RING_COUNT << 24 | RT2661_TX_RING_COUNT << 16 | RT2661_TX_RING_COUNT << 8 | RT2661_TX_RING_COUNT); RAL_WRITE(sc, RT2661_TX_RING_CSR1, RT2661_TX_DESC_WSIZE << 16 | RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */ RT2661_MGT_RING_COUNT); /* initialize Rx rings */ RAL_WRITE(sc, RT2661_RX_RING_CSR, RT2661_RX_DESC_BACK << 16 | RT2661_RX_DESC_WSIZE << 8 | RT2661_RX_RING_COUNT); /* XXX: some magic here */ RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa); /* load base addresses of all 5 Tx rings (4 data + 1 mgt) */ RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f); /* load base address of Rx ring */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2661_def_mac); i++) RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val); rt2661_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set host ready */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2661_MAC_CSR12) & 8) break; DELAY(1000); } if (ntries == 1000) { printf("timeout waiting for BBP/RF to wakeup\n"); rt2661_stop_locked(sc); return; } if (rt2661_bbp_init(sc) != 0) { rt2661_stop_locked(sc); return; } /* select default channel */ sc->sc_curchan = ic->ic_curchan; rt2661_select_band(sc, sc->sc_curchan); rt2661_select_antenna(sc); rt2661_set_chan(sc, sc->sc_curchan); /* update Rx filter */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff; tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR | RT2661_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2661_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); /* clear STA registers */ RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, nitems(sta)); /* initialize ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 4); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); /* kick Rx */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1); sc->sc_flags |= RAL_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_init(void *priv) { struct rt2661_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RAL_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } void rt2661_stop_locked(struct rt2661_softc *sc) { volatile int *flags = &sc->sc_flags; uint32_t tmp; while (*flags & RAL_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RAL_RUNNING) { sc->sc_flags &= ~RAL_RUNNING; /* abort Tx (for all 5 Tx rings) */ RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16); /* disable Rx (value remains after reset!) */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); /* reset ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff); /* reset Tx and Rx rings */ rt2661_reset_tx_ring(sc, &sc->txq[0]); rt2661_reset_tx_ring(sc, &sc->txq[1]); rt2661_reset_tx_ring(sc, &sc->txq[2]); rt2661_reset_tx_ring(sc, &sc->txq[3]); rt2661_reset_tx_ring(sc, &sc->mgtq); rt2661_reset_rx_ring(sc, &sc->rxq); } } void rt2661_stop(void *priv) { struct rt2661_softc *sc = priv; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2661_load_microcode(struct rt2661_softc *sc) { const struct firmware *fp; const char *imagename; int ntries, error; RAL_LOCK_ASSERT(sc); switch (sc->sc_id) { case 0x0301: imagename = "rt2561sfw"; break; case 0x0302: imagename = "rt2561fw"; break; case 0x0401: imagename = "rt2661fw"; break; default: device_printf(sc->sc_dev, "%s: unexpected pci device id 0x%x, " "don't know how to retrieve firmware\n", __func__, sc->sc_id); return EINVAL; } RAL_UNLOCK(sc); fp = firmware_get(imagename); RAL_LOCK(sc); if (fp == NULL) { device_printf(sc->sc_dev, "%s: unable to retrieve firmware image %s\n", __func__, imagename); return EINVAL; } /* * Load 8051 microcode into NIC. */ /* reset 8051 */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* cancel any pending Host to MCU command */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0); /* write 8051's microcode */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL); RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize); RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* kick 8051's ass */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0); /* wait for 8051 to initialize */ for (ntries = 0; ntries < 500; ntries++) { if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY) break; DELAY(100); } if (ntries == 500) { device_printf(sc->sc_dev, "%s: timeout waiting for MCU to initialize\n", __func__); error = EIO; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } #ifdef notyet /* * Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and * false CCA count. This function is called periodically (every seconds) when * in the RUN state. Values taken from the reference driver. */ static void rt2661_rx_tune(struct rt2661_softc *sc) { uint8_t bbp17; uint16_t cca; int lo, hi, dbm; /* * Tuning range depends on operating band and on the presence of an * external low-noise amplifier. */ lo = 0x20; if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan)) lo += 0x08; if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna)) lo += 0x10; hi = lo + 0x20; /* retrieve false CCA count since last call (clear on read) */ cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff; if (dbm >= -35) { bbp17 = 0x60; } else if (dbm >= -58) { bbp17 = hi; } else if (dbm >= -66) { bbp17 = lo + 0x10; } else if (dbm >= -74) { bbp17 = lo + 0x08; } else { /* RSSI < -74dBm, tune using false CCA count */ bbp17 = sc->bbp17; /* current value */ hi -= 2 * (-74 - dbm); if (hi < lo) hi = lo; if (bbp17 > hi) { bbp17 = hi; } else if (cca > 512) { if (++bbp17 > hi) bbp17 = hi; } else if (cca < 100) { if (--bbp17 < lo) bbp17 = lo; } } if (bbp17 != sc->bbp17) { rt2661_bbp_write(sc, 17, bbp17); sc->bbp17 = bbp17; } } /* * Enter/Leave radar detection mode. * This is for 802.11h additional regulatory domains. */ static void rt2661_radar_start(struct rt2661_softc *sc) { uint32_t tmp; /* disable Rx */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 82, 0x20); rt2661_bbp_write(sc, 83, 0x00); rt2661_bbp_write(sc, 84, 0x40); /* save current BBP registers values */ sc->bbp18 = rt2661_bbp_read(sc, 18); sc->bbp21 = rt2661_bbp_read(sc, 21); sc->bbp22 = rt2661_bbp_read(sc, 22); sc->bbp16 = rt2661_bbp_read(sc, 16); sc->bbp17 = rt2661_bbp_read(sc, 17); sc->bbp64 = rt2661_bbp_read(sc, 64); rt2661_bbp_write(sc, 18, 0xff); rt2661_bbp_write(sc, 21, 0x3f); rt2661_bbp_write(sc, 22, 0x3f); rt2661_bbp_write(sc, 16, 0xbd); rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34); rt2661_bbp_write(sc, 64, 0x21); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } static int rt2661_radar_stop(struct rt2661_softc *sc) { uint8_t bbp66; /* read radar detection result */ bbp66 = rt2661_bbp_read(sc, 66); /* restore BBP registers values */ rt2661_bbp_write(sc, 16, sc->bbp16); rt2661_bbp_write(sc, 17, sc->bbp17); rt2661_bbp_write(sc, 18, sc->bbp18); rt2661_bbp_write(sc, 21, sc->bbp21); rt2661_bbp_write(sc, 22, sc->bbp22); rt2661_bbp_write(sc, 64, sc->bbp64); return bbp66 == 1; } #endif static int rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rt2661_tx_desc desc; struct mbuf *m0; int rate; if ((m0 = ieee80211_beacon_alloc(vap->iv_bss))== NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOBUFS; } /* send beacons at the lowest available rate */ rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2; rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ, m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT); /* copy the first 24 bytes of Tx descriptor into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rt2661_enable_tsf_sync(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8); } tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2661_TSF_MODE(1); else tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON; RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp); } static void rt2661_enable_tsf(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_TXRX_CSR9, (RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000) | RT2661_TSF_TICKING | RT2661_TSF_MODE(2)); } /* * Retrieve the "Received Signal Strength Indicator" from the raw values * contained in Rx descriptors. The computation depends on which band the * frame was received. Correction values taken from the reference driver. */ static int rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw) { int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No mapping available. * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2661_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static void rt2661_scan_start(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff); rt2661_set_bssid(sc, ieee80211broadcastaddr); } static void rt2661_scan_end(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); rt2661_enable_tsf_sync(sc); /* XXX keep local copy */ rt2661_set_bssid(sc, vap->iv_bss->ni_bssid); } static void rt2661_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rt2661_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rt2661_chan_2ghz, nitems(rt2661_chan_2ghz), bands, 0); if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rt2661_chan_5ghz, nitems(rt2661_chan_5ghz), bands, 0); } } static void rt2661_set_channel(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2661_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } Index: head/sys/dev/usb/wlan/if_rum.c =================================================================== --- head/sys/dev/usb/wlan/if_rum.c (revision 330687) +++ head/sys/dev/usb/wlan/if_rum.c (revision 330688) @@ -1,3317 +1,3305 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005-2007 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * Copyright (c) 2007-2008 Hans Petter Selasky * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2501USB/RT2601USB chipset driver * http://www.ralinktech.com.tw/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rum_debug #include #include #include #include #ifdef USB_DEBUG static int rum_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum"); SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RWTUN, &rum_debug, 0, "Debug level"); #endif static const STRUCT_USB_HOST_ID rum_devs[] = { #define RUM_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } RUM_DEV(ABOCOM, HWU54DM), RUM_DEV(ABOCOM, RT2573_2), RUM_DEV(ABOCOM, RT2573_3), RUM_DEV(ABOCOM, RT2573_4), RUM_DEV(ABOCOM, WUG2700), RUM_DEV(AMIT, CGWLUSB2GO), RUM_DEV(ASUS, RT2573_1), RUM_DEV(ASUS, RT2573_2), RUM_DEV(BELKIN, F5D7050A), RUM_DEV(BELKIN, F5D9050V3), RUM_DEV(CISCOLINKSYS, WUSB54GC), RUM_DEV(CISCOLINKSYS, WUSB54GR), RUM_DEV(CONCEPTRONIC2, C54RU2), RUM_DEV(COREGA, CGWLUSB2GL), RUM_DEV(COREGA, CGWLUSB2GPX), RUM_DEV(DICKSMITH, CWD854F), RUM_DEV(DICKSMITH, RT2573), RUM_DEV(EDIMAX, EW7318USG), RUM_DEV(DLINK2, DWLG122C1), RUM_DEV(DLINK2, WUA1340), RUM_DEV(DLINK2, DWA111), RUM_DEV(DLINK2, DWA110), RUM_DEV(GIGABYTE, GNWB01GS), RUM_DEV(GIGABYTE, GNWI05GS), RUM_DEV(GIGASET, RT2573), RUM_DEV(GOODWAY, RT2573), RUM_DEV(GUILLEMOT, HWGUSB254LB), RUM_DEV(GUILLEMOT, HWGUSB254V2AP), RUM_DEV(HUAWEI3COM, WUB320G), RUM_DEV(MELCO, G54HP), RUM_DEV(MELCO, SG54HP), RUM_DEV(MELCO, SG54HG), RUM_DEV(MELCO, WLIUCG), RUM_DEV(MELCO, WLRUCG), RUM_DEV(MELCO, WLRUCGAOSS), RUM_DEV(MSI, RT2573_1), RUM_DEV(MSI, RT2573_2), RUM_DEV(MSI, RT2573_3), RUM_DEV(MSI, RT2573_4), RUM_DEV(NOVATECH, RT2573), RUM_DEV(PLANEX2, GWUS54HP), RUM_DEV(PLANEX2, GWUS54MINI2), RUM_DEV(PLANEX2, GWUSMM), RUM_DEV(QCOM, RT2573), RUM_DEV(QCOM, RT2573_2), RUM_DEV(QCOM, RT2573_3), RUM_DEV(RALINK, RT2573), RUM_DEV(RALINK, RT2573_2), RUM_DEV(RALINK, RT2671), RUM_DEV(SITECOMEU, WL113R2), RUM_DEV(SITECOMEU, WL172), RUM_DEV(SPARKLAN, RT2573), RUM_DEV(SURECOM, RT2573), #undef RUM_DEV }; static device_probe_t rum_match; static device_attach_t rum_attach; static device_detach_t rum_detach; static usb_callback_t rum_bulk_read_callback; static usb_callback_t rum_bulk_write_callback; static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data); static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int); static struct ieee80211vap *rum_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rum_vap_delete(struct ieee80211vap *); static void rum_cmdq_cb(void *, int); static int rum_cmd_sleepable(struct rum_softc *, const void *, size_t, uint8_t, CMD_FUNC_PROTO); static void rum_tx_free(struct rum_tx_data *, int); static void rum_setup_tx_list(struct rum_softc *); static void rum_reset_tx_list(struct rum_softc *, struct ieee80211vap *); static void rum_unsetup_tx_list(struct rum_softc *); static void rum_beacon_miss(struct ieee80211vap *); static void rum_sta_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static int rum_set_power_state(struct rum_softc *, int); static int rum_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint8_t rum_crypto_mode(struct rum_softc *, u_int, int); static void rum_setup_tx_desc(struct rum_softc *, struct rum_tx_desc *, struct ieee80211_key *, uint32_t, uint8_t, uint8_t, int, int, int); static uint32_t rum_tx_crypto_flags(struct rum_softc *, struct ieee80211_node *, const struct ieee80211_key *); static int rum_tx_mgt(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_tx_raw(struct rum_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int rum_tx_data(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_transmit(struct ieee80211com *, struct mbuf *); static void rum_start(struct rum_softc *); static void rum_parent(struct ieee80211com *); static void rum_eeprom_read(struct rum_softc *, uint16_t, void *, int); static uint32_t rum_read(struct rum_softc *, uint16_t); static void rum_read_multi(struct rum_softc *, uint16_t, void *, int); static usb_error_t rum_write(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_write_multi(struct rum_softc *, uint16_t, void *, size_t); static usb_error_t rum_setbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_clrbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_modbits(struct rum_softc *, uint16_t, uint32_t, uint32_t); static int rum_bbp_busy(struct rum_softc *); static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t); static uint8_t rum_bbp_read(struct rum_softc *, uint8_t); static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t); static void rum_select_antenna(struct rum_softc *); static void rum_enable_mrr(struct rum_softc *); static void rum_set_txpreamble(struct rum_softc *); static void rum_set_basicrates(struct rum_softc *); static void rum_select_band(struct rum_softc *, struct ieee80211_channel *); static void rum_set_chan(struct rum_softc *, struct ieee80211_channel *); static void rum_set_maxretry(struct rum_softc *, struct ieee80211vap *); static int rum_enable_tsf_sync(struct rum_softc *); static void rum_enable_tsf(struct rum_softc *); static void rum_abort_tsf_sync(struct rum_softc *); static void rum_get_tsf(struct rum_softc *, uint64_t *); static void rum_update_slot_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_slot(struct ieee80211com *); static int rum_wme_update(struct ieee80211com *); static void rum_set_bssid(struct rum_softc *, const uint8_t *); static void rum_set_macaddr(struct rum_softc *, const uint8_t *); static void rum_update_mcast(struct ieee80211com *); static void rum_update_promisc(struct ieee80211com *); static void rum_setpromisc(struct rum_softc *); static const char *rum_get_rf(int); static void rum_read_eeprom(struct rum_softc *); static int rum_bbp_wakeup(struct rum_softc *); static int rum_bbp_init(struct rum_softc *); static void rum_clr_shkey_regs(struct rum_softc *); static int rum_init(struct rum_softc *); static void rum_stop(struct rum_softc *); static void rum_load_microcode(struct rum_softc *, const uint8_t *, size_t); static int rum_set_sleep_time(struct rum_softc *, uint16_t); static int rum_reset(struct ieee80211vap *, u_long); static int rum_set_beacon(struct rum_softc *, struct ieee80211vap *); static int rum_alloc_beacon(struct rum_softc *, struct ieee80211vap *); static void rum_update_beacon_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_beacon(struct ieee80211vap *, int); static int rum_common_key_set(struct rum_softc *, struct ieee80211_key *, uint16_t); static void rum_group_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_group_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static int rum_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int rum_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int rum_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rum_scan_start(struct ieee80211com *); static void rum_scan_end(struct ieee80211com *); static void rum_set_channel(struct ieee80211com *); static void rum_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static int rum_get_rssi(struct rum_softc *, uint8_t); static void rum_ratectl_start(struct rum_softc *, struct ieee80211_node *); static void rum_ratectl_timeout(void *); static void rum_ratectl_task(void *, int); static int rum_pause(struct rum_softc *, int); static const struct { uint32_t reg; uint32_t val; } rum_def_mac[] = { { RT2573_TXRX_CSR0, 0x025fb032 }, { RT2573_TXRX_CSR1, 0x9eaa9eaf }, { RT2573_TXRX_CSR2, 0x8a8b8c8d }, { RT2573_TXRX_CSR3, 0x00858687 }, { RT2573_TXRX_CSR7, 0x2e31353b }, { RT2573_TXRX_CSR8, 0x2a2a2a2c }, { RT2573_TXRX_CSR15, 0x0000000f }, { RT2573_MAC_CSR6, 0x00000fff }, { RT2573_MAC_CSR8, 0x016c030a }, { RT2573_MAC_CSR10, 0x00000718 }, { RT2573_MAC_CSR12, 0x00000004 }, { RT2573_MAC_CSR13, 0x00007f00 }, { RT2573_SEC_CSR2, 0x00000000 }, { RT2573_SEC_CSR3, 0x00000000 }, { RT2573_SEC_CSR4, 0x00000000 }, { RT2573_PHY_CSR1, 0x000023b0 }, { RT2573_PHY_CSR5, 0x00040a06 }, { RT2573_PHY_CSR6, 0x00080606 }, { RT2573_PHY_CSR7, 0x00000408 }, { RT2573_AIFSN_CSR, 0x00002273 }, { RT2573_CWMIN_CSR, 0x00002344 }, { RT2573_CWMAX_CSR, 0x000034aa } }; static const struct { uint8_t reg; uint8_t val; } rum_def_bbp[] = { { 3, 0x80 }, { 15, 0x30 }, { 17, 0x20 }, { 21, 0xc8 }, { 22, 0x38 }, { 23, 0x06 }, { 24, 0xfe }, { 25, 0x0a }, { 26, 0x0d }, { 32, 0x0b }, { 34, 0x12 }, { 37, 0x07 }, { 39, 0xf8 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 60, 0x10 }, { 61, 0x04 }, { 62, 0x04 }, { 75, 0xfe }, { 86, 0xfe }, { 88, 0xfe }, { 90, 0x0f }, { 99, 0x00 }, { 102, 0x16 }, { 107, 0x04 } }; static const uint8_t rum_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rum_chan_5ghz[] = { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rum_rf5226[] = { { 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 }, { 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 }, { 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 }, { 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 }, { 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 }, { 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 }, { 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 }, { 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 }, { 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 }, { 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 }, { 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 }, { 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 }, { 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 }, { 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 }, { 34, 0x00b03, 0x20266, 0x36014, 0x30282 }, { 38, 0x00b03, 0x20267, 0x36014, 0x30284 }, { 42, 0x00b03, 0x20268, 0x36014, 0x30286 }, { 46, 0x00b03, 0x20269, 0x36014, 0x30288 }, { 36, 0x00b03, 0x00266, 0x26014, 0x30288 }, { 40, 0x00b03, 0x00268, 0x26014, 0x30280 }, { 44, 0x00b03, 0x00269, 0x26014, 0x30282 }, { 48, 0x00b03, 0x0026a, 0x26014, 0x30284 }, { 52, 0x00b03, 0x0026b, 0x26014, 0x30286 }, { 56, 0x00b03, 0x0026c, 0x26014, 0x30288 }, { 60, 0x00b03, 0x0026e, 0x26014, 0x30280 }, { 64, 0x00b03, 0x0026f, 0x26014, 0x30282 }, { 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 }, { 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 }, { 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 }, { 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 }, { 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 }, { 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 }, { 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 }, { 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 }, { 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 }, { 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 }, { 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 }, { 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 }, { 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 }, { 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 }, { 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 }, { 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 } }, rum_rf5225[] = { { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, { 34, 0x00b33, 0x01266, 0x26014, 0x30282 }, { 38, 0x00b33, 0x01267, 0x26014, 0x30284 }, { 42, 0x00b33, 0x01268, 0x26014, 0x30286 }, { 46, 0x00b33, 0x01269, 0x26014, 0x30288 }, { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } }; static const struct usb_config rum_config[RUM_N_TRANSFER] = { [RUM_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (MCLBYTES + RT2573_TX_DESC_SIZE + 8), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = rum_bulk_write_callback, .timeout = 5000, /* ms */ }, [RUM_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (MCLBYTES + RT2573_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = rum_bulk_read_callback, }, }; static int rum_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2573_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(rum_devs, sizeof(rum_devs), uaa)); } static int rum_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; uint8_t iface_index; int error, ntries; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; RUM_LOCK_INIT(sc); RUM_CMDQ_LOCK_INIT(sc); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RT2573_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rum_config, RUM_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUM_LOCK(sc); /* retrieve RT2573 rev. no */ for (ntries = 0; ntries < 100; ntries++) { if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for chip to settle\n"); RUM_UNLOCK(sc); goto detach; } /* retrieve MAC address and various other things from EEPROM */ rum_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n", tmp, rum_get_rf(sc->rf_rev)); rum_load_microcode(sc, rt2573_ucode, sizeof(rt2573_ucode)); RUM_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_PMGT /* Station-side power mgmt */ | IEEE80211_C_SWSLEEP /* net80211 managed power mgmt */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_TKIP; rum_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_update_promisc = rum_update_promisc; ic->ic_raw_xmit = rum_raw_xmit; ic->ic_scan_start = rum_scan_start; ic->ic_scan_end = rum_scan_end; ic->ic_set_channel = rum_set_channel; ic->ic_getradiocaps = rum_getradiocaps; ic->ic_transmit = rum_transmit; ic->ic_parent = rum_parent; ic->ic_vap_create = rum_vap_create; ic->ic_vap_delete = rum_vap_delete; ic->ic_updateslot = rum_update_slot; ic->ic_wme.wme_update = rum_wme_update; ic->ic_update_mcast = rum_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2573_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2573_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, rum_cmdq_cb, sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: rum_detach(self); return (ENXIO); /* failure */ } static int rum_detach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; /* Prevent further ioctls */ RUM_LOCK(sc); sc->sc_detached = 1; RUM_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUM_N_TRANSFER); /* free TX list, if any */ RUM_LOCK(sc); rum_unsetup_tx_list(sc); RUM_UNLOCK(sc); if (ic->ic_softc == sc) { ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } mbufq_drain(&sc->sc_snd); RUM_CMDQ_LOCK_DESTROY(sc); RUM_LOCK_DESTROY(sc); return (0); } static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (rum_pause(sc, hz / 100)) break; } return (err); } static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int request) { struct usb_device_request req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_MCU_CNTL; USETW(req.wValue, request); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (rum_do_request(sc, &req, NULL)); } static struct ieee80211vap * rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; rvp = malloc(sizeof(struct rum_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = rum_newstate; vap->iv_key_alloc = rum_key_alloc; vap->iv_key_set = rum_key_set; vap->iv_key_delete = rum_key_delete; vap->iv_update_beacon = rum_update_beacon; vap->iv_reset = rum_reset; vap->iv_max_aid = RT2573_ADDR_MAX; if (opmode == IEEE80211_M_STA) { /* * Move device to the sleep state when * beacon is received and there is no data for us. * * Used only for IEEE80211_S_SLEEP state. */ rvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = rum_sta_recv_mgmt; /* Ignored while sleeping. */ rvp->bmiss = vap->iv_bmiss; vap->iv_bmiss = rum_beacon_miss; } usb_callout_init_mtx(&rvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&rvp->ratectl_task, 0, rum_ratectl_task, rvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void rum_vap_delete(struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; /* Put vap into INIT state. */ ieee80211_new_state(vap, IEEE80211_S_INIT, -1); ieee80211_draintask(ic, &vap->iv_nstate_task); RUM_LOCK(sc); /* Cancel any unfinished Tx. */ rum_reset_tx_list(sc, vap); RUM_UNLOCK(sc); usb_callout_drain(&rvp->ratectl_ch); ieee80211_draintask(ic, &rvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); m_freem(rvp->bcn_mbuf); free(rvp, M_80211_VAP); } static void rum_cmdq_cb(void *arg, int pending) { struct rum_softc *sc = arg; struct rum_cmdq *rc; RUM_CMDQ_LOCK(sc); while (sc->cmdq[sc->cmdq_first].func != NULL) { rc = &sc->cmdq[sc->cmdq_first]; RUM_CMDQ_UNLOCK(sc); RUM_LOCK(sc); rc->func(sc, &rc->data, rc->rvp_id); RUM_UNLOCK(sc); RUM_CMDQ_LOCK(sc); memset(rc, 0, sizeof (*rc)); sc->cmdq_first = (sc->cmdq_first + 1) % RUM_CMDQ_SIZE; } RUM_CMDQ_UNLOCK(sc); } static int rum_cmd_sleepable(struct rum_softc *sc, const void *ptr, size_t len, uint8_t rvp_id, CMD_FUNC_PROTO) { struct ieee80211com *ic = &sc->sc_ic; KASSERT(len <= sizeof(union sec_param), ("buffer overflow")); RUM_CMDQ_LOCK(sc); if (sc->cmdq[sc->cmdq_last].func != NULL) { device_printf(sc->sc_dev, "%s: cmdq overflow\n", __func__); RUM_CMDQ_UNLOCK(sc); return EAGAIN; } if (ptr != NULL) memcpy(&sc->cmdq[sc->cmdq_last].data, ptr, len); sc->cmdq[sc->cmdq_last].rvp_id = rvp_id; sc->cmdq[sc->cmdq_last].func = func; sc->cmdq_last = (sc->cmdq_last + 1) % RUM_CMDQ_SIZE; RUM_CMDQ_UNLOCK(sc); ieee80211_runtask(ic, &sc->cmdq_task); return 0; } static void rum_tx_free(struct rum_tx_data *data, int txerr) { struct rum_softc *sc = data->sc; if (data->m != NULL) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void rum_setup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void rum_reset_tx_list(struct rum_softc *sc, struct ieee80211vap *vap) { struct rum_tx_data *data, *tmp; KASSERT(vap != NULL, ("%s: vap is NULL\n", __func__)); STAILQ_FOREACH_SAFE(data, &sc->tx_q, next, tmp) { if (data->ni != NULL && data->ni->ni_vap == vap) { ieee80211_free_node(data->ni); data->ni = NULL; KASSERT(data->m != NULL, ("%s: m is NULL\n", __func__)); m_freem(data->m); data->m = NULL; STAILQ_REMOVE(&sc->tx_q, data, rum_tx_data, next); STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } } static void rum_unsetup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static void rum_beacon_miss(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); int sleep; RUM_LOCK(sc); if (sc->sc_sleeping && sc->sc_sleep_end < ticks) { DPRINTFN(12, "dropping 'sleeping' bit, " "device must be awake now\n"); sc->sc_sleeping = 0; } sleep = sc->sc_sleeping; RUM_UNLOCK(sc); if (!sleep) rvp->bmiss(vap); #ifdef USB_DEBUG else DPRINTFN(13, "bmiss event is ignored whilst sleeping\n"); #endif } static void rum_sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); if (vap->iv_state == IEEE80211_S_SLEEP && subtype == IEEE80211_FC0_SUBTYPE_BEACON) { RUM_LOCK(sc); DPRINTFN(12, "beacon, mybss %d (flags %02X)\n", !!(sc->last_rx_flags & RT2573_RX_MYBSS), sc->last_rx_flags); if ((sc->last_rx_flags & (RT2573_RX_MYBSS | RT2573_RX_BC)) == (RT2573_RX_MYBSS | RT2573_RX_BC)) { /* * Put it to sleep here; in case if there is a data * for us, iv_recv_mgmt() will wakeup the device via * SLEEP -> RUN state transition. */ rum_set_power_state(sc, 1); } RUM_UNLOCK(sc); } rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); } static int rum_set_power_state(struct rum_softc *sc, int sleep) { usb_error_t uerror; RUM_LOCK_ASSERT(sc); DPRINTFN(12, "moving to %s state (sleep time %u)\n", sleep ? "sleep" : "awake", sc->sc_sleep_time); uerror = rum_do_mcu_request(sc, sleep ? RT2573_MCU_SLEEP : RT2573_MCU_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "%s: could not change power state: %s\n", __func__, usbd_errstr(uerror)); return (EIO); } sc->sc_sleeping = !!sleep; sc->sc_sleep_end = sleep ? ticks + sc->sc_sleep_time : 0; return (0); } static int rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; usb_error_t uerror; int ret = 0; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUM_LOCK(sc); usb_callout_stop(&rvp->ratectl_ch); if (ostate == IEEE80211_S_SLEEP && vap->iv_opmode == IEEE80211_M_STA) { rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); /* * Ignore any errors; * any subsequent TX will wakeup it anyway */ (void) rum_set_power_state(sc, 0); } switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) rum_abort_tsf_sync(sc); break; case IEEE80211_S_RUN: if (ostate == IEEE80211_S_SLEEP) break; /* already handled */ ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { ret = EINVAL; goto run_fail; } rum_update_slot_cb(sc, NULL, 0); rum_enable_mrr(sc); rum_set_txpreamble(sc); rum_set_basicrates(sc); rum_set_maxretry(sc, vap); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); rum_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { if ((ret = rum_alloc_beacon(sc, vap)) != 0) goto run_fail; } if (vap->iv_opmode != IEEE80211_M_MONITOR && vap->iv_opmode != IEEE80211_M_AHDEMO) { if ((ret = rum_enable_tsf_sync(sc)) != 0) goto run_fail; } else rum_enable_tsf(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) rum_ratectl_start(sc, ni); run_fail: ieee80211_free_node(ni); break; case IEEE80211_S_SLEEP: /* Implemented for STA mode only. */ if (vap->iv_opmode != IEEE80211_M_STA) break; uerror = rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } uerror = rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } ret = rum_set_power_state(sc, 1); if (ret != 0) { device_printf(sc->sc_dev, "%s: could not move to the SLEEP state: %s\n", __func__, usbd_errstr(uerror)); } break; default: break; } RUM_UNLOCK(sc); IEEE80211_LOCK(ic); return (ret == 0 ? rvp->newstate(vap, nstate, arg) : ret); } static void rum_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211vap *vap; struct rum_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; unsigned int len; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", actlen); /* free resources */ data = usbd_xfer_get_priv(xfer); rum_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(MCLBYTES + RT2573_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (MCLBYTES + RT2573_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RT2573_TX_DESC_SIZE); usbd_m_copy_in(pc, RT2573_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* align end on a 4-bytes boundary */ len = (RT2573_TX_DESC_SIZE + m->m_pkthdr.len + 3) & ~3; if ((len % 64) == 0) len += 4; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } rum_start(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); counter_u64_add(sc->sc_ic.ic_oerrors, 1); data = usbd_xfer_get_priv(xfer); if (data != NULL) { rum_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void rum_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; uint8_t rssi = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < RT2573_RX_DESC_SIZE) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len -= RT2573_RX_DESC_SIZE; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->sc_rx_desc, RT2573_RX_DESC_SIZE); rssi = rum_get_rssi(sc, sc->sc_rx_desc.rssi); flags = le32toh(sc->sc_rx_desc.flags); sc->last_rx_flags = flags; if (len < ((flags >> 16) & 0xfff)) { DPRINTFN(5, "%s: frame is truncated from %d to %d " "bytes\n", device_get_nameunit(sc->sc_dev), (flags >> 16) & 0xfff, len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len = (flags >> 16) & 0xfff; if (len < sizeof(struct ieee80211_frame_ack)) { DPRINTFN(5, "%s: frame too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if (flags & RT2573_RX_CRC_ERROR) { /* * This should not happen since we did not * request to receive those frames when we * filled RUM_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if ((flags & RT2573_RX_DEC_MASK) != RT2573_RX_DEC_OK) { switch (flags & RT2573_RX_DEC_MASK) { case RT2573_RX_IV_ERROR: DPRINTFN(5, "IV/EIV error\n"); break; case RT2573_RX_MIC_ERROR: DPRINTFN(5, "MIC error\n"); break; case RT2573_RX_KEY_ERROR: DPRINTFN(5, "Key error\n"); break; } counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } m = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } usbd_copy_out(pc, RT2573_RX_DESC_SIZE, mtod(m, uint8_t *), len); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && (flags & RT2573_RX_CIP_MASK) != RT2573_RX_CIP_MODE(RT2573_MODE_NOSEC)) { wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; m->m_flags |= M_WEP; } /* finalize mbuf */ m->m_pkthdr.len = m->m_len = len; if (ieee80211_radiotap_active(ic)) { struct rum_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RT2573_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); rum_get_tsf(sc, &tap->wr_tsf); tap->wr_antsignal = RT2573_NOISE_FLOOR + rssi; tap->wr_antnoise = RT2573_NOISE_FLOOR; tap->wr_antenna = sc->rx_ant; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RUM_UNLOCK(sc); if (m) { if (m->m_len >= sizeof(struct ieee80211_frame_min)) ni = ieee80211_find_rxnode(ic, wh); else ni = NULL; if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, RT2573_NOISE_FLOOR); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2573_NOISE_FLOOR); } RUM_LOCK(sc); rum_start(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t rum_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } /* * Map net80211 cipher to RT2573 security mode. */ static uint8_t rum_crypto_mode(struct rum_softc *sc, u_int cipher, int keylen) { switch (cipher) { case IEEE80211_CIPHER_WEP: return (keylen < 8 ? RT2573_MODE_WEP40 : RT2573_MODE_WEP104); case IEEE80211_CIPHER_TKIP: return RT2573_MODE_TKIP; case IEEE80211_CIPHER_AES_CCM: return RT2573_MODE_AES_CCMP; default: device_printf(sc->sc_dev, "unknown cipher %d\n", cipher); return 0; } } static void rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc, struct ieee80211_key *k, uint32_t flags, uint8_t xflags, uint8_t qid, int hdrlen, int len, int rate) { struct ieee80211com *ic = &sc->sc_ic; struct wmeParams *wmep = &sc->wme_params[qid]; uint16_t plcp_length; int remainder; flags |= RT2573_TX_VALID; flags |= len << 16; if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { const struct ieee80211_cipher *cip = k->wk_cipher; len += cip->ic_header + cip->ic_trailer + cip->ic_miclen; desc->eiv = 0; /* for WEP */ cip->ic_setiv(k, (uint8_t *)&desc->iv); } /* setup PLCP fields */ desc->plcp_signal = rum_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { flags |= RT2573_TX_OFDM; plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { if (rate == 0) rate = 2; /* avoid division by zero */ plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2573_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->flags = htole32(flags); desc->hdrlen = hdrlen; desc->xflags = xflags; desc->wme = htole16(RT2573_QID(qid) | RT2573_AIFSN(wmep->wmep_aifsn) | RT2573_LOGCWMIN(wmep->wmep_logcwmin) | RT2573_LOGCWMAX(wmep->wmep_logcwmax)); } static int rum_sendprot(struct rum_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; - const struct ieee80211_frame *wh; struct rum_tx_data *data; struct mbuf *mprot; - int protrate, pktlen, flags, isshort; - uint16_t dur; + int protrate, flags; RUM_LOCK_ASSERT(sc); - KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, - ("protection %d", prot)); - wh = mtod(m, const struct ieee80211_frame *); - pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; + mprot = ieee80211_alloc_prot(ni, m, rate, prot); + if (mprot == NULL) { + if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); + device_printf(sc->sc_dev, + "could not allocate mbuf for protection mode %d\n", prot); + return (ENOBUFS); + } protrate = ieee80211_ctl_rate(ic->ic_rt, rate); - - isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; - dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = 0; - if (prot == IEEE80211_PROT_RTSCTS) { - /* NB: CTS is the same size as an ACK */ - dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); + if (prot == IEEE80211_PROT_RTSCTS) flags |= RT2573_TX_NEED_ACK; - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); - } else { - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); - } - if (mprot == NULL) { - /* XXX stat + msg */ - return (ENOBUFS); - } + data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; rum_setup_tx_desc(sc, &data->desc, NULL, flags, 0, 0, 0, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static uint32_t rum_tx_crypto_flags(struct rum_softc *sc, struct ieee80211_node *ni, const struct ieee80211_key *k) { struct ieee80211vap *vap = ni->ni_vap; u_int cipher; uint32_t flags = 0; uint8_t mode, pos; if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { cipher = k->wk_cipher->ic_cipher; pos = k->wk_keyix; mode = rum_crypto_mode(sc, cipher, k->wk_keylen); if (mode == 0) return 0; flags |= RT2573_TX_CIP_MODE(mode); /* Do not trust GROUP flag */ if (!(k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) flags |= RT2573_TX_KEY_PAIR; else pos += 0 * RT2573_SKEY_MAX; /* vap id */ flags |= RT2573_TX_KEY_ID(pos); if (cipher == IEEE80211_CIPHER_TKIP) flags |= RT2573_TX_TKIPMIC; } return flags; } static int rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, xflags = 0; int hdrlen; RUM_LOCK_ASSERT(sc); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); ac = M_WME_GETAC(m0); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) return (ENOENT); if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) return (ENOBUFS); wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if (type == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2573_TX_TIMESTAMP; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return (0); } static int rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct rum_tx_data *data; uint32_t flags; uint8_t ac, type, xflags = 0; int rate, error; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ac = params->ibp_pri & 3; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) return (EINVAL); flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2573_TX_NEED_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rum_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) return (ENOBUFS); flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ rum_setup_tx_desc(sc, &data->desc, NULL, flags, xflags, ac, 0, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, qos, xflags = 0; int error, hdrlen, rate; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); if (IEEE80211_QOS_HAS_SEQ(wh)) qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; else qos = 0; ac = M_WME_GETAC(m0); if (m0->m_flags & M_EAPOL) rate = tp->mgmtrate; else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) { m_freem(m0); return (ENOENT); } if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rum_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } } if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rum_softc *sc = ic->ic_softc; int error; RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RUM_UNLOCK(sc); return (error); } rum_start(sc); RUM_UNLOCK(sc); return (0); } static void rum_start(struct rum_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RUM_LOCK_ASSERT(sc); if (!sc->sc_running) return; while (sc->tx_nfree >= RUM_TX_MINFREE && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rum_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } } } static void rum_parent(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); RUM_LOCK(sc); if (sc->sc_detached) { RUM_UNLOCK(sc); return; } RUM_UNLOCK(sc); if (ic->ic_nrunning > 0) { if (rum_init(sc) == 0) ieee80211_start_all(ic); else ieee80211_stop(vap); } else rum_stop(sc); } static void rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint32_t rum_read(struct rum_softc *sc, uint16_t reg) { uint32_t val; rum_read_multi(sc, reg, &val, sizeof val); return le32toh(val); } static void rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi read MAC register: %s\n", usbd_errstr(error)); } } static usb_error_t rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val) { uint32_t tmp = htole32(val); return (rum_write_multi(sc, reg, &tmp, sizeof tmp)); } static usb_error_t rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len) { struct usb_device_request req; usb_error_t error; size_t offset; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_WRITE_MULTI_MAC; USETW(req.wValue, 0); /* write at most 64 bytes at a time */ for (offset = 0; offset < len; offset += 64) { USETW(req.wIndex, reg + offset); USETW(req.wLength, MIN(len - offset, 64)); error = rum_do_request(sc, &req, (char *)buf + offset); if (error != 0) { device_printf(sc->sc_dev, "could not multi write MAC register: %s\n", usbd_errstr(error)); return (error); } } return (USB_ERR_NORMAL_COMPLETION); } static usb_error_t rum_setbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) | mask)); } static usb_error_t rum_clrbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) & ~mask)); } static usb_error_t rum_modbits(struct rum_softc *sc, uint16_t reg, uint32_t set, uint32_t unset) { return (rum_write(sc, reg, (rum_read(sc, reg) & ~unset) | set)); } static int rum_bbp_busy(struct rum_softc *sc) { int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) return (ETIMEDOUT); return (0); } static void rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val; rum_write(sc, RT2573_PHY_CSR3, tmp); } static uint8_t rum_bbp_read(struct rum_softc *sc, uint8_t reg) { uint32_t val; int ntries; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8; rum_write(sc, RT2573_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = rum_read(sc, RT2573_PHY_CSR3); if (!(val & RT2573_BBP_BUSY)) return val & 0xff; if (rum_pause(sc, hz / 100)) break; } device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } static void rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 | (reg & 3); rum_write(sc, RT2573_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff); } static void rum_select_antenna(struct rum_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rum_bbp_read(sc, 4); bbp77 = rum_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); rum_bbp_write(sc, 4, bbp4); rum_bbp_write(sc, 77, bbp77); rum_write(sc, RT2573_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rum_enable_mrr(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED | RT2573_MRR_CCK_FALLBACK); } else { rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED, RT2573_MRR_CCK_FALLBACK); } } static void rum_set_txpreamble(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); else rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); } static void rum_set_basicrates(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { /* 11a basic rates: 6, 12, 24Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x150); } else { /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0xf); } } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } sc->bbp17 = bbp17; rum_bbp_write(sc, 17, bbp17); rum_bbp_write(sc, 96, bbp96); rum_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rum_bbp_write(sc, 75, 0x80); rum_bbp_write(sc, 86, 0x80); rum_bbp_write(sc, 88, 0x80); } rum_bbp_write(sc, 35, bbp35); rum_bbp_write(sc, 97, bbp97); rum_bbp_write(sc, 98, bbp98); if (IEEE80211_IS_CHAN_2GHZ(c)) { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_2GHZ, RT2573_PA_PE_5GHZ); } else { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_5GHZ, RT2573_PA_PE_2GHZ); } } static void rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT; int8_t power; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != ic->ic_curchan->ic_flags) { rum_select_band(sc, c); rum_select_antenna(sc); } ic->ic_curchan = c; rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_pause(sc, hz / 100); /* enable smart mode for MIMO-capable RFs */ bbp3 = rum_bbp_read(sc, 3); bbp3 &= ~RT2573_SMART_MODE; if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) bbp3 |= RT2573_SMART_MODE; rum_bbp_write(sc, 3, bbp3); if (bbp94 != RT2573_BBPR94_DEFAULT) rum_bbp_write(sc, 94, bbp94); /* give the chip some extra time to do the switchover */ rum_pause(sc, hz / 100); } static void rum_set_maxretry(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211_node *ni = vap->iv_bss; const struct ieee80211_txparam *tp = ni->ni_txparms; struct rum_vap *rvp = RUM_VAP(vap); rvp->maxretry = MIN(tp->maxretry, 0xf); rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_RETRY(rvp->maxretry) | RT2573_LONG_RETRY(rvp->maxretry), RT2573_SHORT_RETRY_MASK | RT2573_LONG_RETRY_MASK); } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static int rum_enable_tsf_sync(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; uint16_t bintval; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ if (rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8) != 0) return EIO; } tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ bintval = vap->iv_bss->ni_intval; tmp |= bintval * 16; tmp |= RT2573_TSF_TIMER_EN | RT2573_TBTT_TIMER_EN; switch (vap->iv_opmode) { case IEEE80211_M_STA: /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_STA); break; case IEEE80211_M_IBSS: /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_IBSS); tmp |= RT2573_BCN_TX_EN; break; case IEEE80211_M_HOSTAP: /* SYNC with nobody */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_HOSTAP); tmp |= RT2573_BCN_TX_EN; break; default: device_printf(sc->sc_dev, "Enabling TSF failed. undefined opmode %d\n", vap->iv_opmode); return EINVAL; } if (rum_write(sc, RT2573_TXRX_CSR9, tmp) != 0) return EIO; /* refresh current sleep time */ return (rum_set_sleep_time(sc, bintval)); } static void rum_enable_tsf(struct rum_softc *sc) { rum_modbits(sc, RT2573_TXRX_CSR9, RT2573_TSF_TIMER_EN | RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_DIS), 0x00ffffff); } static void rum_abort_tsf_sync(struct rum_softc *sc) { rum_clrbits(sc, RT2573_TXRX_CSR9, 0x00ffffff); } static void rum_get_tsf(struct rum_softc *sc, uint64_t *buf) { rum_read_multi(sc, RT2573_TXRX_CSR12, buf, sizeof (*buf)); } static void rum_update_slot_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); rum_modbits(sc, RT2573_MAC_CSR9, slottime, 0xff); DPRINTF("setting slot time to %uus\n", slottime); } static void rum_update_slot(struct ieee80211com *ic) { rum_cmd_sleepable(ic->ic_softc, NULL, 0, 0, rum_update_slot_cb); } static int rum_wme_update(struct ieee80211com *ic) { struct chanAccParams chp; const struct wmeParams *chanp; struct rum_softc *sc = ic->ic_softc; int error = 0; ieee80211_wme_ic_getparams(ic, &chp); chanp = chp.cap_wmeParams; RUM_LOCK(sc); error = rum_write(sc, RT2573_AIFSN_CSR, chanp[WME_AC_VO].wmep_aifsn << 12 | chanp[WME_AC_VI].wmep_aifsn << 8 | chanp[WME_AC_BK].wmep_aifsn << 4 | chanp[WME_AC_BE].wmep_aifsn); if (error) goto print_err; error = rum_write(sc, RT2573_CWMIN_CSR, chanp[WME_AC_VO].wmep_logcwmin << 12 | chanp[WME_AC_VI].wmep_logcwmin << 8 | chanp[WME_AC_BK].wmep_logcwmin << 4 | chanp[WME_AC_BE].wmep_logcwmin); if (error) goto print_err; error = rum_write(sc, RT2573_CWMAX_CSR, chanp[WME_AC_VO].wmep_logcwmax << 12 | chanp[WME_AC_VI].wmep_logcwmax << 8 | chanp[WME_AC_BK].wmep_logcwmax << 4 | chanp[WME_AC_BE].wmep_logcwmax); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP01_CSR, chanp[WME_AC_BK].wmep_txopLimit << 16 | chanp[WME_AC_BE].wmep_txopLimit); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP23_CSR, chanp[WME_AC_VO].wmep_txopLimit << 16 | chanp[WME_AC_VI].wmep_txopLimit); if (error) goto print_err; memcpy(sc->wme_params, chanp, sizeof(*chanp) * WME_NUM_AC); print_err: RUM_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: WME update failed, error %d\n", __func__, error); } return (error); } static void rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid) { rum_write(sc, RT2573_MAC_CSR4, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); rum_write(sc, RT2573_MAC_CSR5, bssid[4] | bssid[5] << 8 | RT2573_NUM_BSSID_MSK(1)); } static void rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr) { rum_write(sc, RT2573_MAC_CSR2, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); rum_write(sc, RT2573_MAC_CSR3, addr[4] | addr[5] << 8 | 0xff << 16); } static void rum_setpromisc(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_promisc == 0) rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); else rum_clrbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); DPRINTF("%s promiscuous mode\n", ic->ic_promisc > 0 ? "entering" : "leaving"); } static void rum_update_promisc(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); if (sc->sc_running) rum_setpromisc(sc); RUM_UNLOCK(sc); } static void rum_update_mcast(struct ieee80211com *ic) { /* Ignore. */ } static const char * rum_get_rf(int rev) { switch (rev) { case RT2573_RF_2527: return "RT2527 (MIMO XR)"; case RT2573_RF_2528: return "RT2528"; case RT2573_RF_5225: return "RT5225 (MIMO XR)"; case RT2573_RF_5226: return "RT5226"; default: return "unknown"; } } static void rum_read_eeprom(struct rum_softc *sc) { uint16_t val; #ifdef RUM_DEBUG int i; #endif /* read MAC address */ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_ic.ic_macaddr, 6); rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF("RF revision=%d\n", sc->rf_rev); rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2); val = le16toh(val); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF("RF freq=%d\n", sc->rffreq); /* read Tx power for all a/b/g channels */ rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14); /* XXX default Tx power for 802.11a channels */ memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) DPRINTF("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i]); #endif /* read default values for BBP registers */ rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; DPRINTF("BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif } static int rum_bbp_wakeup(struct rum_softc *sc) { unsigned int ntries; for (ntries = 0; ntries < 100; ntries++) { if (rum_read(sc, RT2573_MAC_CSR12) & 8) break; rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */ if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); return (ETIMEDOUT); } return (0); } static int rum_bbp_init(struct rum_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { const uint8_t val = rum_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rum_def_bbp); i++) rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rum_clr_shkey_regs(struct rum_softc *sc) { rum_write(sc, RT2573_SEC_CSR0, 0); rum_write(sc, RT2573_SEC_CSR1, 0); rum_write(sc, RT2573_SEC_CSR5, 0); } static int rum_init(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i, ret; RUM_LOCK(sc); if (sc->sc_running) { ret = 0; goto end; } /* initialize MAC registers to default values */ for (i = 0; i < nitems(rum_def_mac); i++) rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val); /* reset some WME parameters to default values */ sc->wme_params[0].wmep_aifsn = 2; sc->wme_params[0].wmep_logcwmin = 4; sc->wme_params[0].wmep_logcwmax = 10; /* set host ready */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ if ((ret = rum_bbp_wakeup(sc)) != 0) goto end; if ((ret = rum_bbp_init(sc)) != 0) goto end; /* select default channel */ rum_select_band(sc, ic->ic_curchan); rum_select_antenna(sc); rum_set_chan(sc, ic->ic_curchan); /* clear STA registers */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); /* clear security registers (if required) */ if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } rum_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* initialize ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_HOST_READY); /* * Allocate Tx and Rx xfer queues. */ rum_setup_tx_list(sc); /* update Rx filter */ tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff; tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR | RT2573_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2573_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2573_DROP_NOT_TO_ME; } rum_write(sc, RT2573_TXRX_CSR0, tmp); sc->sc_running = 1; usbd_xfer_set_stall(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_start(sc->sc_xfer[RUM_BULK_RD]); end: RUM_UNLOCK(sc); if (ret != 0) rum_stop(sc); return ret; } static void rum_stop(struct rum_softc *sc) { RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return; } sc->sc_running = 0; RUM_UNLOCK(sc); /* * Drain the USB transfers, if not already drained: */ usbd_transfer_drain(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[RUM_BULK_RD]); RUM_LOCK(sc); rum_unsetup_tx_list(sc); /* disable Rx */ rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DISABLE_RX); /* reset ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); RUM_UNLOCK(sc); } static void rum_load_microcode(struct rum_softc *sc, const uint8_t *ucode, size_t size) { uint16_t reg = RT2573_MCU_CODE_BASE; usb_error_t err; /* copy firmware image into NIC */ for (; size >= 4; reg += 4, ucode += 4, size -= 4) { err = rum_write(sc, reg, UGETDW(ucode)); if (err) { /* firmware already loaded ? */ device_printf(sc->sc_dev, "Firmware load " "failure! (ignored)\n"); break; } } err = rum_do_mcu_request(sc, RT2573_MCU_RUN); if (err != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "could not run firmware: %s\n", usbd_errstr(err)); } /* give the chip some time to boot */ rum_pause(sc, hz / 8); } static int rum_set_sleep_time(struct rum_softc *sc, uint16_t bintval) { struct ieee80211com *ic = &sc->sc_ic; usb_error_t uerror; int exp, delay; RUM_LOCK_ASSERT(sc); exp = ic->ic_lintval / bintval; delay = ic->ic_lintval % bintval; if (exp > RT2573_TBCN_EXP_MAX) exp = RT2573_TBCN_EXP_MAX; if (delay > RT2573_TBCN_DELAY_MAX) delay = RT2573_TBCN_DELAY_MAX; uerror = rum_modbits(sc, RT2573_MAC_CSR11, RT2573_TBCN_EXP(exp) | RT2573_TBCN_DELAY(delay), RT2573_TBCN_EXP(RT2573_TBCN_EXP_MAX) | RT2573_TBCN_DELAY(RT2573_TBCN_DELAY_MAX)); if (uerror != USB_ERR_NORMAL_COMPLETION) return (EIO); sc->sc_sleep_time = IEEE80211_TU_TO_TICKS(exp * bintval + delay); return (0); } static int rum_reset(struct ieee80211vap *vap, u_long cmd) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct rum_softc *sc = ic->ic_softc; int error; switch (cmd) { case IEEE80211_IOC_POWERSAVE: case IEEE80211_IOC_PROTMODE: case IEEE80211_IOC_RTSTHRESHOLD: error = 0; break; case IEEE80211_IOC_POWERSAVESLEEP: ni = ieee80211_ref_node(vap->iv_bss); RUM_LOCK(sc); error = rum_set_sleep_time(sc, ni->ni_intval); if (vap->iv_state == IEEE80211_S_SLEEP) { /* Use new values for wakeup timer. */ rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); } /* XXX send reassoc */ RUM_UNLOCK(sc); ieee80211_free_node(ni); break; default: error = ENETRESET; break; } return (error); } static int rum_set_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_vap *rvp = RUM_VAP(vap); struct mbuf *m = rvp->bcn_mbuf; const struct ieee80211_txparam *tp; struct rum_tx_desc desc; RUM_LOCK_ASSERT(sc); if (m == NULL) return EINVAL; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return EINVAL; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; rum_setup_tx_desc(sc, &desc, NULL, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ, 0, 0, m->m_pkthdr.len, tp->mgmtrate); /* copy the Tx descriptor into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0), (uint8_t *)&desc, RT2573_TX_DESC_SIZE) != 0) return EIO; /* copy beacon header and payload into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0) + RT2573_TX_DESC_SIZE, mtod(m, uint8_t *), m->m_pkthdr.len) != 0) return EIO; return 0; } static int rum_alloc_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (ni->ni_chan == IEEE80211_CHAN_ANYC) return EINVAL; m = ieee80211_beacon_alloc(ni); if (m == NULL) return ENOMEM; if (rvp->bcn_mbuf != NULL) m_freem(rvp->bcn_mbuf); rvp->bcn_mbuf = m; return (rum_set_beacon(sc, vap)); } static void rum_update_beacon_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211vap *vap = data->vap; rum_set_beacon(sc, vap); } static void rum_update_beacon(struct ieee80211vap *vap, int item) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m = rvp->bcn_mbuf; int mcast = 0; RUM_LOCK(sc); if (m == NULL) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); RUM_UNLOCK(sc); return; } rvp->bcn_mbuf = m; } switch (item) { case IEEE80211_BEACON_ERP: rum_update_slot(ic); break; case IEEE80211_BEACON_TIM: mcast = 1; /*TODO*/ break; default: break; } RUM_UNLOCK(sc); setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, m, mcast); rum_cmd_sleepable(sc, &vap, sizeof(vap), 0, rum_update_beacon_cb); } static int rum_common_key_set(struct rum_softc *sc, struct ieee80211_key *k, uint16_t base) { if (rum_write_multi(sc, base, k->wk_key, k->wk_keylen)) return EIO; if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) { if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE, k->wk_txmic, 8)) return EIO; if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE + 8, k->wk_rxmic, 8)) return EIO; } return 0; } static void rum_group_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t mode; if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting group key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_SKEY(rvp_id, k->wk_keyix)) != 0) goto print_err; /* Set cipher mode. */ if (rum_modbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, mode << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, RT2573_SEC_CSR0, 1 << (rvp_id * RT2573_SKEY_MAX + k->wk_keyix)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_group_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); rum_clrbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX); rum_clrbits(sc, RT2573_SEC_CSR0, rvp_id * RT2573_SKEY_MAX + k->wk_keyix); } static void rum_pair_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t buf[IEEE80211_ADDR_LEN + 1]; uint8_t mode; mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting pairwise key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_PKEY(k->wk_keyix)) != 0) goto print_err; IEEE80211_ADDR_COPY(buf, k->wk_macaddr); buf[IEEE80211_ADDR_LEN] = mode; /* Set transmitter address and cipher mode. */ if (rum_write_multi(sc, RT2573_ADDR_ENTRY(k->wk_keyix), buf, sizeof buf) != 0) goto print_err; /* Enable key table lookup for this vap. */ if (sc->vap_key_count[rvp_id]++ == 0) if (rum_setbits(sc, RT2573_SEC_CSR4, 1 << rvp_id) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, k->wk_keyix < 32 ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set pairwise key %d, vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_pair_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing key %d\n", __func__, k->wk_keyix); rum_clrbits(sc, (k->wk_keyix < 32) ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)); sc->keys_bmap &= ~(1ULL << k->wk_keyix); if (--sc->vap_key_count[rvp_id] == 0) rum_clrbits(sc, RT2573_SEC_CSR4, 1 << rvp_id); } static int rum_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct rum_softc *sc = vap->iv_ic->ic_softc; uint8_t i; if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { RUM_LOCK(sc); for (i = 0; i < RT2573_ADDR_MAX; i++) { if ((sc->keys_bmap & (1ULL << i)) == 0) { sc->keys_bmap |= (1ULL << i); *keyix = i; break; } } RUM_UNLOCK(sc); if (i == RT2573_ADDR_MAX) { device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return 0; } } else *keyix = 0; } else { *keyix = ieee80211_crypto_get_key_wepidx(vap, k); } *rxkeyix = *keyix; return 1; } static int rum_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_set_cb : rum_pair_key_set_cb); } static int rum_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_del_cb : rum_pair_key_del_cb); } static int rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct rum_softc *sc = ni->ni_ic->ic_softc; int ret; RUM_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { ret = ENETDOWN; goto bad; } if (sc->tx_nfree < RUM_TX_MINFREE) { ret = EIO; goto bad; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if ((ret = rum_tx_mgt(sc, m, ni)) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if ((ret = rum_tx_raw(sc, m, ni, params)) != 0) goto bad; } RUM_UNLOCK(sc); return 0; bad: RUM_UNLOCK(sc); m_freem(m); return ret; } static void rum_ratectl_start(struct rum_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rum_vap *rvp = RUM_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); } static void rum_ratectl_timeout(void *arg) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &rvp->ratectl_task); } static void rum_ratectl_task(void *arg, int pending) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; int ok[3], fail; RUM_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof(sc->sta)); ok[0] = (le32toh(sc->sta[4]) & 0xffff); /* TX ok w/o retry */ ok[1] = (le32toh(sc->sta[4]) >> 16); /* TX ok w/ one retry */ ok[2] = (le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ multiple retries */ fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */ txs->flags = IEEE80211_RATECTL_TX_STATS_RETRIES; txs->nframes = ok[0] + ok[1] + ok[2] + fail; txs->nsuccess = txs->nframes - fail; /* XXX at least */ txs->nretries = ok[1] + ok[2] * 2 + fail * (rvp->maxretry + 1); if (txs->nframes != 0) ieee80211_ratectl_tx_update(vap, txs); /* count TX retry-fail as Tx errors */ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, fail); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); RUM_UNLOCK(sc); } static void rum_scan_start(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_abort_tsf_sync(sc); rum_set_bssid(sc, ieee80211broadcastaddr); RUM_UNLOCK(sc); } static void rum_scan_end(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { RUM_LOCK(sc); if (ic->ic_opmode != IEEE80211_M_AHDEMO) rum_enable_tsf_sync(sc); else rum_enable_tsf(sc); rum_set_bssid(sc, sc->sc_bssid); RUM_UNLOCK(sc); } } static void rum_set_channel(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_set_chan(sc, ic->ic_curchan); RUM_UNLOCK(sc); } static void rum_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rum_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rum_chan_2ghz, nitems(rum_chan_2ghz), bands, 0); if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rum_chan_5ghz, nitems(rum_chan_5ghz), bands, 0); } } static int rum_get_rssi(struct rum_softc *sc, uint8_t raw) { struct ieee80211com *ic = &sc->sc_ic; int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No RSSI mapping * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2573_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (!sc->ext_5ghz_lna && lna != 1) rssi += 4; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static int rum_pause(struct rum_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } static device_method_t rum_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rum_match), DEVMETHOD(device_attach, rum_attach), DEVMETHOD(device_detach, rum_detach), DEVMETHOD_END }; static driver_t rum_driver = { .name = "rum", .methods = rum_methods, .size = sizeof(struct rum_softc), }; static devclass_t rum_devclass; DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, NULL, 0); MODULE_DEPEND(rum, wlan, 1, 1, 1); MODULE_DEPEND(rum, usb, 1, 1, 1); MODULE_VERSION(rum, 1); USB_PNP_HOST_INFO(rum_devs); Index: head/sys/dev/usb/wlan/if_run.c =================================================================== --- head/sys/dev/usb/wlan/if_run.c (revision 330687) +++ head/sys/dev/usb/wlan/if_run.c (revision 330688) @@ -1,6336 +1,6314 @@ /*- * Copyright (c) 2008,2010 Damien Bergamini * ported to FreeBSD by Akinori Furukoshi * USB Consulting, Hans Petter Selasky * Copyright (c) 2013-2014 Kevin Lo * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2700U/RT2800U/RT3000U/RT3900E chipset driver. * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR run_debug #include #include #include #include #ifdef USB_DEBUG #define RUN_DEBUG #endif #ifdef RUN_DEBUG int run_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, run, CTLFLAG_RW, 0, "USB run"); SYSCTL_INT(_hw_usb_run, OID_AUTO, debug, CTLFLAG_RWTUN, &run_debug, 0, "run debug level"); enum { RUN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ RUN_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ RUN_DEBUG_RECV = 0x00000004, /* basic recv operation */ RUN_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ RUN_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */ RUN_DEBUG_RATE = 0x00000020, /* rate adaptation */ RUN_DEBUG_USB = 0x00000040, /* usb requests */ RUN_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */ RUN_DEBUG_BEACON = 0x00000100, /* beacon handling */ RUN_DEBUG_INTR = 0x00000200, /* ISR */ RUN_DEBUG_TEMP = 0x00000400, /* temperature calibration */ RUN_DEBUG_ROM = 0x00000800, /* various ROM info */ RUN_DEBUG_KEY = 0x00001000, /* crypto keys management */ RUN_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */ RUN_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */ RUN_DEBUG_RESET = 0x00008000, /* initialization progress */ RUN_DEBUG_CALIB = 0x00010000, /* calibration progress */ RUN_DEBUG_CMD = 0x00020000, /* command queue */ RUN_DEBUG_ANY = 0xffffffff }; #define RUN_DPRINTF(_sc, _m, ...) do { \ if (run_debug & (_m)) \ device_printf((_sc)->sc_dev, __VA_ARGS__); \ } while(0) #else #define RUN_DPRINTF(_sc, _m, ...) do { (void) _sc; } while (0) #endif #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh) /* * Because of LOR in run_key_delete(), use atomic instead. * '& RUN_CMDQ_MASQ' is to loop cmdq[]. */ #define RUN_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & RUN_CMDQ_MASQ) static const STRUCT_USB_HOST_ID run_devs[] = { #define RUN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } #define RUN_DEV_EJECT(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RUN_EJECT) } #define RUN_EJECT 1 RUN_DEV(ABOCOM, RT2770), RUN_DEV(ABOCOM, RT2870), RUN_DEV(ABOCOM, RT3070), RUN_DEV(ABOCOM, RT3071), RUN_DEV(ABOCOM, RT3072), RUN_DEV(ABOCOM2, RT2870_1), RUN_DEV(ACCTON, RT2770), RUN_DEV(ACCTON, RT2870_1), RUN_DEV(ACCTON, RT2870_2), RUN_DEV(ACCTON, RT2870_3), RUN_DEV(ACCTON, RT2870_4), RUN_DEV(ACCTON, RT2870_5), RUN_DEV(ACCTON, RT3070), RUN_DEV(ACCTON, RT3070_1), RUN_DEV(ACCTON, RT3070_2), RUN_DEV(ACCTON, RT3070_3), RUN_DEV(ACCTON, RT3070_4), RUN_DEV(ACCTON, RT3070_5), RUN_DEV(AIRTIES, RT3070), RUN_DEV(ALLWIN, RT2070), RUN_DEV(ALLWIN, RT2770), RUN_DEV(ALLWIN, RT2870), RUN_DEV(ALLWIN, RT3070), RUN_DEV(ALLWIN, RT3071), RUN_DEV(ALLWIN, RT3072), RUN_DEV(ALLWIN, RT3572), RUN_DEV(AMIGO, RT2870_1), RUN_DEV(AMIGO, RT2870_2), RUN_DEV(AMIT, CGWLUSB2GNR), RUN_DEV(AMIT, RT2870_1), RUN_DEV(AMIT2, RT2870), RUN_DEV(ASUS, RT2870_1), RUN_DEV(ASUS, RT2870_2), RUN_DEV(ASUS, RT2870_3), RUN_DEV(ASUS, RT2870_4), RUN_DEV(ASUS, RT2870_5), RUN_DEV(ASUS, USBN13), RUN_DEV(ASUS, RT3070_1), RUN_DEV(ASUS, USBN66), RUN_DEV(ASUS, USB_N53), RUN_DEV(ASUS2, USBN11), RUN_DEV(AZUREWAVE, RT2870_1), RUN_DEV(AZUREWAVE, RT2870_2), RUN_DEV(AZUREWAVE, RT3070_1), RUN_DEV(AZUREWAVE, RT3070_2), RUN_DEV(AZUREWAVE, RT3070_3), RUN_DEV(BELKIN, F9L1103), RUN_DEV(BELKIN, F5D8053V3), RUN_DEV(BELKIN, F5D8055), RUN_DEV(BELKIN, F5D8055V2), RUN_DEV(BELKIN, F6D4050V1), RUN_DEV(BELKIN, F6D4050V2), RUN_DEV(BELKIN, RT2870_1), RUN_DEV(BELKIN, RT2870_2), RUN_DEV(CISCOLINKSYS, AE1000), RUN_DEV(CISCOLINKSYS2, RT3070), RUN_DEV(CISCOLINKSYS3, RT3070), RUN_DEV(CONCEPTRONIC2, RT2870_1), RUN_DEV(CONCEPTRONIC2, RT2870_2), RUN_DEV(CONCEPTRONIC2, RT2870_3), RUN_DEV(CONCEPTRONIC2, RT2870_4), RUN_DEV(CONCEPTRONIC2, RT2870_5), RUN_DEV(CONCEPTRONIC2, RT2870_6), RUN_DEV(CONCEPTRONIC2, RT2870_7), RUN_DEV(CONCEPTRONIC2, RT2870_8), RUN_DEV(CONCEPTRONIC2, RT3070_1), RUN_DEV(CONCEPTRONIC2, RT3070_2), RUN_DEV(CONCEPTRONIC2, VIGORN61), RUN_DEV(COREGA, CGWLUSB300GNM), RUN_DEV(COREGA, RT2870_1), RUN_DEV(COREGA, RT2870_2), RUN_DEV(COREGA, RT2870_3), RUN_DEV(COREGA, RT3070), RUN_DEV(CYBERTAN, RT2870), RUN_DEV(DLINK, RT2870), RUN_DEV(DLINK, RT3072), RUN_DEV(DLINK, DWA127), RUN_DEV(DLINK, DWA140B3), RUN_DEV(DLINK, DWA160B2), RUN_DEV(DLINK, DWA140D1), RUN_DEV(DLINK, DWA162), RUN_DEV(DLINK2, DWA130), RUN_DEV(DLINK2, RT2870_1), RUN_DEV(DLINK2, RT2870_2), RUN_DEV(DLINK2, RT3070_1), RUN_DEV(DLINK2, RT3070_2), RUN_DEV(DLINK2, RT3070_3), RUN_DEV(DLINK2, RT3070_4), RUN_DEV(DLINK2, RT3070_5), RUN_DEV(DLINK2, RT3072), RUN_DEV(DLINK2, RT3072_1), RUN_DEV(EDIMAX, EW7717), RUN_DEV(EDIMAX, EW7718), RUN_DEV(EDIMAX, EW7733UND), RUN_DEV(EDIMAX, RT2870_1), RUN_DEV(ENCORE, RT3070_1), RUN_DEV(ENCORE, RT3070_2), RUN_DEV(ENCORE, RT3070_3), RUN_DEV(GIGABYTE, GNWB31N), RUN_DEV(GIGABYTE, GNWB32L), RUN_DEV(GIGABYTE, RT2870_1), RUN_DEV(GIGASET, RT3070_1), RUN_DEV(GIGASET, RT3070_2), RUN_DEV(GUILLEMOT, HWNU300), RUN_DEV(HAWKING, HWUN2), RUN_DEV(HAWKING, RT2870_1), RUN_DEV(HAWKING, RT2870_2), RUN_DEV(HAWKING, RT3070), RUN_DEV(IODATA, RT3072_1), RUN_DEV(IODATA, RT3072_2), RUN_DEV(IODATA, RT3072_3), RUN_DEV(IODATA, RT3072_4), RUN_DEV(LINKSYS4, RT3070), RUN_DEV(LINKSYS4, WUSB100), RUN_DEV(LINKSYS4, WUSB54GCV3), RUN_DEV(LINKSYS4, WUSB600N), RUN_DEV(LINKSYS4, WUSB600NV2), RUN_DEV(LOGITEC, RT2870_1), RUN_DEV(LOGITEC, RT2870_2), RUN_DEV(LOGITEC, RT2870_3), RUN_DEV(LOGITEC, LANW300NU2), RUN_DEV(LOGITEC, LANW150NU2), RUN_DEV(LOGITEC, LANW300NU2S), RUN_DEV(MELCO, WLIUCG300HP), RUN_DEV(MELCO, RT2870_2), RUN_DEV(MELCO, WLIUCAG300N), RUN_DEV(MELCO, WLIUCG300N), RUN_DEV(MELCO, WLIUCG301N), RUN_DEV(MELCO, WLIUCGN), RUN_DEV(MELCO, WLIUCGNM), RUN_DEV(MELCO, WLIUCG300HPV1), RUN_DEV(MELCO, WLIUCGNM2), RUN_DEV(MOTOROLA4, RT2770), RUN_DEV(MOTOROLA4, RT3070), RUN_DEV(MSI, RT3070_1), RUN_DEV(MSI, RT3070_2), RUN_DEV(MSI, RT3070_3), RUN_DEV(MSI, RT3070_4), RUN_DEV(MSI, RT3070_5), RUN_DEV(MSI, RT3070_6), RUN_DEV(MSI, RT3070_7), RUN_DEV(MSI, RT3070_8), RUN_DEV(MSI, RT3070_9), RUN_DEV(MSI, RT3070_10), RUN_DEV(MSI, RT3070_11), RUN_DEV(NETGEAR, WNDA4100), RUN_DEV(OVISLINK, RT3072), RUN_DEV(PARA, RT3070), RUN_DEV(PEGATRON, RT2870), RUN_DEV(PEGATRON, RT3070), RUN_DEV(PEGATRON, RT3070_2), RUN_DEV(PEGATRON, RT3070_3), RUN_DEV(PHILIPS, RT2870), RUN_DEV(PLANEX2, GWUS300MINIS), RUN_DEV(PLANEX2, GWUSMICRON), RUN_DEV(PLANEX2, RT2870), RUN_DEV(PLANEX2, RT3070), RUN_DEV(QCOM, RT2870), RUN_DEV(QUANTA, RT3070), RUN_DEV(RALINK, RT2070), RUN_DEV(RALINK, RT2770), RUN_DEV(RALINK, RT2870), RUN_DEV(RALINK, RT3070), RUN_DEV(RALINK, RT3071), RUN_DEV(RALINK, RT3072), RUN_DEV(RALINK, RT3370), RUN_DEV(RALINK, RT3572), RUN_DEV(RALINK, RT3573), RUN_DEV(RALINK, RT5370), RUN_DEV(RALINK, RT5572), RUN_DEV(RALINK, RT8070), RUN_DEV(SAMSUNG, WIS09ABGN), RUN_DEV(SAMSUNG2, RT2870_1), RUN_DEV(SENAO, RT2870_1), RUN_DEV(SENAO, RT2870_2), RUN_DEV(SENAO, RT2870_3), RUN_DEV(SENAO, RT2870_4), RUN_DEV(SENAO, RT3070), RUN_DEV(SENAO, RT3071), RUN_DEV(SENAO, RT3072_1), RUN_DEV(SENAO, RT3072_2), RUN_DEV(SENAO, RT3072_3), RUN_DEV(SENAO, RT3072_4), RUN_DEV(SENAO, RT3072_5), RUN_DEV(SITECOMEU, RT2770), RUN_DEV(SITECOMEU, RT2870_1), RUN_DEV(SITECOMEU, RT2870_2), RUN_DEV(SITECOMEU, RT2870_3), RUN_DEV(SITECOMEU, RT2870_4), RUN_DEV(SITECOMEU, RT3070), RUN_DEV(SITECOMEU, RT3070_2), RUN_DEV(SITECOMEU, RT3070_3), RUN_DEV(SITECOMEU, RT3070_4), RUN_DEV(SITECOMEU, RT3071), RUN_DEV(SITECOMEU, RT3072_1), RUN_DEV(SITECOMEU, RT3072_2), RUN_DEV(SITECOMEU, RT3072_3), RUN_DEV(SITECOMEU, RT3072_4), RUN_DEV(SITECOMEU, RT3072_5), RUN_DEV(SITECOMEU, RT3072_6), RUN_DEV(SITECOMEU, WL608), RUN_DEV(SPARKLAN, RT2870_1), RUN_DEV(SPARKLAN, RT3070), RUN_DEV(SWEEX2, LW153), RUN_DEV(SWEEX2, LW303), RUN_DEV(SWEEX2, LW313), RUN_DEV(TOSHIBA, RT3070), RUN_DEV(UMEDIA, RT2870_1), RUN_DEV(ZCOM, RT2870_1), RUN_DEV(ZCOM, RT2870_2), RUN_DEV(ZINWELL, RT2870_1), RUN_DEV(ZINWELL, RT2870_2), RUN_DEV(ZINWELL, RT3070), RUN_DEV(ZINWELL, RT3072_1), RUN_DEV(ZINWELL, RT3072_2), RUN_DEV(ZYXEL, RT2870_1), RUN_DEV(ZYXEL, RT2870_2), RUN_DEV(ZYXEL, RT3070), RUN_DEV_EJECT(ZYXEL, NWD2705), RUN_DEV_EJECT(RALINK, RT_STOR), #undef RUN_DEV_EJECT #undef RUN_DEV }; static device_probe_t run_match; static device_attach_t run_attach; static device_detach_t run_detach; static usb_callback_t run_bulk_rx_callback; static usb_callback_t run_bulk_tx_callback0; static usb_callback_t run_bulk_tx_callback1; static usb_callback_t run_bulk_tx_callback2; static usb_callback_t run_bulk_tx_callback3; static usb_callback_t run_bulk_tx_callback4; static usb_callback_t run_bulk_tx_callback5; static void run_autoinst(void *, struct usb_device *, struct usb_attach_arg *); static int run_driver_loaded(struct module *, int, void *); static void run_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index); static struct ieee80211vap *run_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void run_vap_delete(struct ieee80211vap *); static void run_cmdq_cb(void *, int); static void run_setup_tx_list(struct run_softc *, struct run_endpoint_queue *); static void run_unsetup_tx_list(struct run_softc *, struct run_endpoint_queue *); static int run_load_microcode(struct run_softc *); static int run_reset(struct run_softc *); static usb_error_t run_do_request(struct run_softc *, struct usb_device_request *, void *); static int run_read(struct run_softc *, uint16_t, uint32_t *); static int run_read_region_1(struct run_softc *, uint16_t, uint8_t *, int); static int run_write_2(struct run_softc *, uint16_t, uint16_t); static int run_write(struct run_softc *, uint16_t, uint32_t); static int run_write_region_1(struct run_softc *, uint16_t, const uint8_t *, int); static int run_set_region_4(struct run_softc *, uint16_t, uint32_t, int); static int run_efuse_read(struct run_softc *, uint16_t, uint16_t *, int); static int run_efuse_read_2(struct run_softc *, uint16_t, uint16_t *); static int run_eeprom_read_2(struct run_softc *, uint16_t, uint16_t *); static int run_rt2870_rf_write(struct run_softc *, uint32_t); static int run_rt3070_rf_read(struct run_softc *, uint8_t, uint8_t *); static int run_rt3070_rf_write(struct run_softc *, uint8_t, uint8_t); static int run_bbp_read(struct run_softc *, uint8_t, uint8_t *); static int run_bbp_write(struct run_softc *, uint8_t, uint8_t); static int run_mcu_cmd(struct run_softc *, uint8_t, uint16_t); static const char *run_get_rf(uint16_t); static void run_rt3593_get_txpower(struct run_softc *); static void run_get_txpower(struct run_softc *); static int run_read_eeprom(struct run_softc *); static struct ieee80211_node *run_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static int run_media_change(struct ifnet *); static int run_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int run_wme_update(struct ieee80211com *); static void run_key_set_cb(void *); static int run_key_set(struct ieee80211vap *, struct ieee80211_key *); static void run_key_delete_cb(void *); static int run_key_delete(struct ieee80211vap *, struct ieee80211_key *); static void run_ratectl_to(void *); static void run_ratectl_cb(void *, int); static void run_drain_fifo(void *); static void run_iter_func(void *, struct ieee80211_node *); static void run_newassoc_cb(void *); static void run_newassoc(struct ieee80211_node *, int); static void run_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static void run_rx_frame(struct run_softc *, struct mbuf *, uint32_t); static void run_tx_free(struct run_endpoint_queue *pq, struct run_tx_data *, int); static void run_set_tx_desc(struct run_softc *, struct run_tx_data *); static int run_tx(struct run_softc *, struct mbuf *, struct ieee80211_node *); static int run_tx_mgt(struct run_softc *, struct mbuf *, struct ieee80211_node *); static int run_sendprot(struct run_softc *, const struct mbuf *, struct ieee80211_node *, int, int); static int run_tx_param(struct run_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int run_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int run_transmit(struct ieee80211com *, struct mbuf *); static void run_start(struct run_softc *); static void run_parent(struct ieee80211com *); static void run_iq_calib(struct run_softc *, u_int); static void run_set_agc(struct run_softc *, uint8_t); static void run_select_chan_group(struct run_softc *, int); static void run_set_rx_antenna(struct run_softc *, int); static void run_rt2870_set_chan(struct run_softc *, u_int); static void run_rt3070_set_chan(struct run_softc *, u_int); static void run_rt3572_set_chan(struct run_softc *, u_int); static void run_rt3593_set_chan(struct run_softc *, u_int); static void run_rt5390_set_chan(struct run_softc *, u_int); static void run_rt5592_set_chan(struct run_softc *, u_int); static int run_set_chan(struct run_softc *, struct ieee80211_channel *); static void run_set_channel(struct ieee80211com *); static void run_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void run_scan_start(struct ieee80211com *); static void run_scan_end(struct ieee80211com *); static void run_update_beacon(struct ieee80211vap *, int); static void run_update_beacon_cb(void *); static void run_updateprot(struct ieee80211com *); static void run_updateprot_cb(void *); static void run_usb_timeout_cb(void *); static void run_reset_livelock(struct run_softc *); static void run_enable_tsf_sync(struct run_softc *); static void run_enable_tsf(struct run_softc *); static void run_get_tsf(struct run_softc *, uint64_t *); static void run_enable_mrr(struct run_softc *); static void run_set_txpreamble(struct run_softc *); static void run_set_basicrates(struct run_softc *); static void run_set_leds(struct run_softc *, uint16_t); static void run_set_bssid(struct run_softc *, const uint8_t *); static void run_set_macaddr(struct run_softc *, const uint8_t *); static void run_updateslot(struct ieee80211com *); static void run_updateslot_cb(void *); static void run_update_mcast(struct ieee80211com *); static int8_t run_rssi2dbm(struct run_softc *, uint8_t, uint8_t); static void run_update_promisc_locked(struct run_softc *); static void run_update_promisc(struct ieee80211com *); static void run_rt5390_bbp_init(struct run_softc *); static int run_bbp_init(struct run_softc *); static int run_rt3070_rf_init(struct run_softc *); static void run_rt3593_rf_init(struct run_softc *); static void run_rt5390_rf_init(struct run_softc *); static int run_rt3070_filter_calib(struct run_softc *, uint8_t, uint8_t, uint8_t *); static void run_rt3070_rf_setup(struct run_softc *); static void run_rt3593_rf_setup(struct run_softc *); static void run_rt5390_rf_setup(struct run_softc *); static int run_txrx_enable(struct run_softc *); static void run_adjust_freq_offset(struct run_softc *); static void run_init_locked(struct run_softc *); static void run_stop(void *); static void run_delay(struct run_softc *, u_int); static eventhandler_tag run_etag; static const struct rt2860_rate { uint8_t rate; uint8_t mcs; enum ieee80211_phytype phy; uint8_t ctl_ridx; uint16_t sp_ack_dur; uint16_t lp_ack_dur; } rt2860_rates[] = { { 2, 0, IEEE80211_T_DS, 0, 314, 314 }, { 4, 1, IEEE80211_T_DS, 1, 258, 162 }, { 11, 2, IEEE80211_T_DS, 2, 223, 127 }, { 22, 3, IEEE80211_T_DS, 3, 213, 117 }, { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 }, { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 }, { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 }, { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 }, { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 }, { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 }, { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 }, { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 } }; static const struct { uint16_t reg; uint32_t val; } rt2870_def_mac[] = { RT2870_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2860_def_bbp[] = { RT2860_DEF_BBP },rt5390_def_bbp[] = { RT5390_DEF_BBP },rt5592_def_bbp[] = { RT5592_DEF_BBP }; /* * Default values for BBP register R196 for RT5592. */ static const uint8_t rt5592_bbp_r196[] = { 0xe0, 0x1f, 0x38, 0x32, 0x08, 0x28, 0x19, 0x0a, 0xff, 0x00, 0x16, 0x10, 0x10, 0x0b, 0x36, 0x2c, 0x26, 0x24, 0x42, 0x36, 0x30, 0x2d, 0x4c, 0x46, 0x3d, 0x40, 0x3e, 0x42, 0x3d, 0x40, 0x3c, 0x34, 0x2c, 0x2f, 0x3c, 0x35, 0x2e, 0x2a, 0x49, 0x41, 0x36, 0x31, 0x30, 0x30, 0x0e, 0x0d, 0x28, 0x21, 0x1c, 0x16, 0x50, 0x4a, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x14, 0x32, 0x2c, 0x36, 0x4c, 0x43, 0x2c, 0x2e, 0x36, 0x30, 0x6e }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2860_rf2850[] = { RT2860_RF2850 }; struct { uint8_t n, r, k; } rt3070_freqs[] = { RT3070_RF3052 }; static const struct rt5592_freqs { uint16_t n; uint8_t k, m, r; } rt5592_freqs_20mhz[] = { RT5592_RF5592_20MHZ },rt5592_freqs_40mhz[] = { RT5592_RF5592_40MHZ }; static const struct { uint8_t reg; uint8_t val; } rt3070_def_rf[] = { RT3070_DEF_RF },rt3572_def_rf[] = { RT3572_DEF_RF },rt3593_def_rf[] = { RT3593_DEF_RF },rt5390_def_rf[] = { RT5390_DEF_RF },rt5392_def_rf[] = { RT5392_DEF_RF },rt5592_def_rf[] = { RT5592_DEF_RF },rt5592_2ghz_def_rf[] = { RT5592_2GHZ_DEF_RF },rt5592_5ghz_def_rf[] = { RT5592_5GHZ_DEF_RF }; static const struct { u_int firstchan; u_int lastchan; uint8_t reg; uint8_t val; } rt5592_chan_5ghz[] = { RT5592_CHAN_5GHZ }; static const struct usb_config run_config[RUN_N_XFER] = { [RUN_BULK_TX_BE] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .ep_index = 0, .direction = UE_DIR_OUT, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback0, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_BK] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 1, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback1, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_VI] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 2, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback2, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_VO] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 3, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = run_bulk_tx_callback3, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_HCCA] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 4, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,}, .callback = run_bulk_tx_callback4, .timeout = 5000, /* ms */ }, [RUN_BULK_TX_PRIO] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .ep_index = 5, .bufsize = RUN_MAX_TXSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,}, .callback = run_bulk_tx_callback5, .timeout = 5000, /* ms */ }, [RUN_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = RUN_MAX_RXSZ, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = run_bulk_rx_callback, } }; static void run_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa) { struct usb_interface *iface; struct usb_interface_descriptor *id; if (uaa->dev_state != UAA_DEV_READY) return; iface = usbd_get_iface(udev, 0); if (iface == NULL) return; id = iface->idesc; if (id == NULL || id->bInterfaceClass != UICLASS_MASS) return; if (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa)) return; if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0) uaa->dev_state = UAA_DEV_EJECTING; } static int run_driver_loaded(struct module *mod, int what, void *arg) { switch (what) { case MOD_LOAD: run_etag = EVENTHANDLER_REGISTER(usb_dev_configured, run_autoinst, NULL, EVENTHANDLER_PRI_ANY); break; case MOD_UNLOAD: EVENTHANDLER_DEREGISTER(usb_dev_configured, run_etag); break; default: return (EOPNOTSUPP); } return (0); } static int run_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2860_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa)); } static int run_attach(device_t self) { struct run_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ieee80211com *ic = &sc->sc_ic; uint32_t ver; uint8_t iface_index; int ntries, error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; if (USB_GET_DRIVER_INFO(uaa) != RUN_EJECT) sc->sc_flags |= RUN_FLAG_FWLOAD_NEEDED; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RT2860_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, run_config, RUN_N_XFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUN_LOCK(sc); /* wait for the chip to settle */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_ASIC_VER_ID, &ver) != 0) { RUN_UNLOCK(sc); goto detach; } if (ver != 0 && ver != 0xffffffff) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); RUN_UNLOCK(sc); goto detach; } sc->mac_ver = ver >> 16; sc->mac_rev = ver & 0xffff; /* retrieve RF rev. no and various other things from EEPROM */ run_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n", sc->mac_ver, sc->mac_rev, run_get_rf(sc->rf_rev), sc->ntxchains, sc->nrxchains, ether_sprintf(ic->ic_macaddr)); RUN_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA | /* station mode supported */ IEEE80211_C_MONITOR | /* monitor mode supported */ IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | IEEE80211_C_WDS | /* 4-address traffic works */ IEEE80211_C_MBSS | IEEE80211_C_SHPREAMBLE | /* short preamble supported */ IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_WME | /* WME */ IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */ ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_TKIP; ic->ic_flags |= IEEE80211_F_DATAPAD; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; run_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_scan_start = run_scan_start; ic->ic_scan_end = run_scan_end; ic->ic_set_channel = run_set_channel; ic->ic_getradiocaps = run_getradiocaps; ic->ic_node_alloc = run_node_alloc; ic->ic_newassoc = run_newassoc; ic->ic_updateslot = run_updateslot; ic->ic_update_mcast = run_update_mcast; ic->ic_wme.wme_update = run_wme_update; ic->ic_raw_xmit = run_raw_xmit; ic->ic_update_promisc = run_update_promisc; ic->ic_vap_create = run_vap_create; ic->ic_vap_delete = run_vap_delete; ic->ic_transmit = run_transmit; ic->ic_parent = run_parent; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RUN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RUN_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, run_cmdq_cb, sc); TASK_INIT(&sc->ratectl_task, 0, run_ratectl_cb, sc); usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0); if (bootverbose) ieee80211_announce(ic); return (0); detach: run_detach(self); return (ENXIO); } static void run_drain_mbufq(struct run_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; RUN_LOCK_ASSERT(sc, MA_OWNED); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; ieee80211_free_node(ni); m_freem(m); } } static int run_detach(device_t self) { struct run_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; int i; RUN_LOCK(sc); sc->sc_detached = 1; RUN_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUN_N_XFER); RUN_LOCK(sc); sc->ratectl_run = RUN_RATECTL_OFF; sc->cmdq_run = sc->cmdq_key_set = RUN_CMDQ_ABORT; /* free TX list, if any */ for (i = 0; i != RUN_EP_QUEUES; i++) run_unsetup_tx_list(sc, &sc->sc_epq[i]); /* Free TX queue */ run_drain_mbufq(sc); RUN_UNLOCK(sc); if (sc->sc_ic.ic_softc == sc) { /* drain tasks */ usb_callout_drain(&sc->ratectl_ch); ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_draintask(ic, &sc->ratectl_task); ieee80211_ifdetach(ic); } mtx_destroy(&sc->sc_mtx); return (0); } static struct ieee80211vap * run_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct run_softc *sc = ic->ic_softc; struct run_vap *rvp; struct ieee80211vap *vap; int i; if (sc->rvp_cnt >= RUN_VAP_MAX) { device_printf(sc->sc_dev, "number of VAPs maxed out\n"); return (NULL); } switch (opmode) { case IEEE80211_M_STA: /* enable s/w bmiss handling for sta mode */ flags |= IEEE80211_CLONE_NOBEACONS; /* fall though */ case IEEE80211_M_IBSS: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* other than WDS vaps, only one at a time */ if (!TAILQ_EMPTY(&ic->ic_vaps)) return (NULL); break; case IEEE80211_M_WDS: TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next){ if(vap->iv_opmode != IEEE80211_M_HOSTAP) continue; /* WDS vap's always share the local mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; } if (vap == NULL) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return (NULL); } break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return (NULL); } rvp = malloc(sizeof(struct run_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->vap; if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } vap->iv_update_beacon = run_update_beacon; vap->iv_max_aid = RT2870_WCID_MAX; /* * To delete the right key from h/w, we need wcid. * Luckily, there is unused space in ieee80211_key{}, wk_pad, * and matching wcid will be written into there. So, cast * some spells to remove 'const' from ieee80211_key{} */ vap->iv_key_delete = (void *)run_key_delete; vap->iv_key_set = (void *)run_key_set; /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = run_newstate; if (opmode == IEEE80211_M_IBSS) { rvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = run_recv_mgmt; } ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, run_media_change, ieee80211_media_status, mac); /* make sure id is always unique */ for (i = 0; i < RUN_VAP_MAX; i++) { if((sc->rvp_bmap & 1 << i) == 0){ sc->rvp_bmap |= 1 << i; rvp->rvp_id = i; break; } } if (sc->rvp_cnt++ == 0) ic->ic_opmode = opmode; if (opmode == IEEE80211_M_HOSTAP) sc->cmdq_run = RUN_CMDQ_GO; RUN_DPRINTF(sc, RUN_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n", rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt); return (vap); } static void run_vap_delete(struct ieee80211vap *vap) { struct run_vap *rvp = RUN_VAP(vap); struct ieee80211com *ic; struct run_softc *sc; uint8_t rvp_id; if (vap == NULL) return; ic = vap->iv_ic; sc = ic->ic_softc; RUN_LOCK(sc); m_freem(rvp->beacon_mbuf); rvp->beacon_mbuf = NULL; rvp_id = rvp->rvp_id; sc->ratectl_run &= ~(1 << rvp_id); sc->rvp_bmap &= ~(1 << rvp_id); run_set_region_4(sc, RT2860_SKEY(rvp_id, 0), 0, 128); run_set_region_4(sc, RT2860_BCN_BASE(rvp_id), 0, 512); --sc->rvp_cnt; RUN_DPRINTF(sc, RUN_DEBUG_STATE, "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap, sc->rvp_cnt); RUN_UNLOCK(sc); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } /* * There are numbers of functions need to be called in context thread. * Rather than creating taskqueue event for each of those functions, * here is all-for-one taskqueue callback function. This function * guarantees deferred functions are executed in the same order they * were enqueued. * '& RUN_CMDQ_MASQ' is to loop cmdq[]. */ static void run_cmdq_cb(void *arg, int pending) { struct run_softc *sc = arg; uint8_t i; /* call cmdq[].func locked */ RUN_LOCK(sc); for (i = sc->cmdq_exec; sc->cmdq[i].func && pending; i = sc->cmdq_exec, pending--) { RUN_DPRINTF(sc, RUN_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i, pending); if (sc->cmdq_run == RUN_CMDQ_GO) { /* * If arg0 is NULL, callback func needs more * than one arg. So, pass ptr to cmdq struct. */ if (sc->cmdq[i].arg0) sc->cmdq[i].func(sc->cmdq[i].arg0); else sc->cmdq[i].func(&sc->cmdq[i]); } sc->cmdq[i].arg0 = NULL; sc->cmdq[i].func = NULL; sc->cmdq_exec++; sc->cmdq_exec &= RUN_CMDQ_MASQ; } RUN_UNLOCK(sc); } static void run_setup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq) { struct run_tx_data *data; memset(pq, 0, sizeof(*pq)); STAILQ_INIT(&pq->tx_qh); STAILQ_INIT(&pq->tx_fh); for (data = &pq->tx_data[0]; data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) { data->sc = sc; STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); } pq->tx_nfree = RUN_TX_RING_COUNT; } static void run_unsetup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq) { struct run_tx_data *data; /* make sure any subsequent use of the queues will fail */ pq->tx_nfree = 0; STAILQ_INIT(&pq->tx_fh); STAILQ_INIT(&pq->tx_qh); /* free up all node references and mbufs */ for (data = &pq->tx_data[0]; data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) { if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int run_load_microcode(struct run_softc *sc) { usb_device_request_t req; const struct firmware *fw; const u_char *base; uint32_t tmp; int ntries, error; const uint64_t *temp; uint64_t bytes; RUN_UNLOCK(sc); fw = firmware_get("runfw"); RUN_LOCK(sc); if (fw == NULL) { device_printf(sc->sc_dev, "failed loadfirmware of file %s\n", "runfw"); return ENOENT; } if (fw->datasize != 8192) { device_printf(sc->sc_dev, "invalid firmware size (should be 8KB)\n"); error = EINVAL; goto fail; } /* * RT3071/RT3072 use a different firmware * run-rt2870 (8KB) contains both, * first half (4KB) is for rt2870, * last half is for rt3071. */ base = fw->data; if ((sc->mac_ver) != 0x2860 && (sc->mac_ver) != 0x2872 && (sc->mac_ver) != 0x3070) { base += 4096; } /* cheap sanity check */ temp = fw->data; bytes = *temp; if (bytes != be64toh(0xffffff0210280210ULL)) { device_printf(sc->sc_dev, "firmware checksum failed\n"); error = EINVAL; goto fail; } /* write microcode image */ if (sc->sc_flags & RUN_FLAG_FWLOAD_NEEDED) { run_write_region_1(sc, RT2870_FW_BASE, base, 4096); run_write(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); run_write(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_RESET; USETW(req.wValue, 8); USETW(req.wIndex, 0); USETW(req.wLength, 0); if ((error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)) != 0) { device_printf(sc->sc_dev, "firmware reset failed\n"); goto fail; } run_delay(sc, 10); run_write(sc, RT2860_H2M_BBPAGENT, 0); run_write(sc, RT2860_H2M_MAILBOX, 0); run_write(sc, RT2860_H2M_INTSRC, 0); if ((error = run_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0)) != 0) goto fail; /* wait until microcontroller is ready */ for (ntries = 0; ntries < 1000; ntries++) { if ((error = run_read(sc, RT2860_SYS_CTRL, &tmp)) != 0) goto fail; if (tmp & RT2860_MCU_READY) break; run_delay(sc, 10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MCU to initialize\n"); error = ETIMEDOUT; goto fail; } device_printf(sc->sc_dev, "firmware %s ver. %u.%u loaded\n", (base == fw->data) ? "RT2870" : "RT3071", *(base + 4092), *(base + 4093)); fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static int run_reset(struct run_softc *sc) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_RESET; USETW(req.wValue, 1); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)); } static usb_error_t run_do_request(struct run_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; RUN_LOCK_ASSERT(sc, MA_OWNED); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; RUN_DPRINTF(sc, RUN_DEBUG_USB, "Control request failed, %s (retrying)\n", usbd_errstr(err)); run_delay(sc, 10); } return (err); } static int run_read(struct run_softc *sc, uint16_t reg, uint32_t *val) { uint32_t tmp; int error; error = run_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp); if (error == 0) *val = le32toh(tmp); else *val = 0xffffffff; return (error); } static int run_read_region_1(struct run_softc *sc, uint16_t reg, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2870_READ_REGION_1; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); return (run_do_request(sc, &req, buf)); } static int run_write_2(struct run_softc *sc, uint16_t reg, uint16_t val) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_WRITE_2; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); return (run_do_request(sc, &req, NULL)); } static int run_write(struct run_softc *sc, uint16_t reg, uint32_t val) { int error; if ((error = run_write_2(sc, reg, val & 0xffff)) == 0) error = run_write_2(sc, reg + 2, val >> 16); return (error); } static int run_write_region_1(struct run_softc *sc, uint16_t reg, const uint8_t *buf, int len) { #if 1 int i, error = 0; /* * NB: the WRITE_REGION_1 command is not stable on RT2860. * We thus issue multiple WRITE_2 commands instead. */ KASSERT((len & 1) == 0, ("run_write_region_1: Data too long.\n")); for (i = 0; i < len && error == 0; i += 2) error = run_write_2(sc, reg + i, buf[i] | buf[i + 1] << 8); return (error); #else usb_device_request_t req; int error = 0; /* * NOTE: It appears the WRITE_REGION_1 command cannot be * passed a huge amount of data, which will crash the * firmware. Limit amount of data passed to 64-bytes at a * time. */ while (len > 0) { int delta = 64; if (delta > len) delta = len; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2870_WRITE_REGION_1; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, delta); error = run_do_request(sc, &req, __DECONST(uint8_t *, buf)); if (error != 0) break; reg += delta; buf += delta; len -= delta; } return (error); #endif } static int run_set_region_4(struct run_softc *sc, uint16_t reg, uint32_t val, int len) { int i, error = 0; KASSERT((len & 3) == 0, ("run_set_region_4: Invalid data length.\n")); for (i = 0; i < len && error == 0; i += 4) error = run_write(sc, reg + i, val); return (error); } static int run_efuse_read(struct run_softc *sc, uint16_t addr, uint16_t *val, int count) { uint32_t tmp; uint16_t reg; int error, ntries; if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0) return (error); if (count == 2) addr *= 2; /*- * Read one 16-byte block into registers EFUSE_DATA[0-3]: * DATA0: F E D C * DATA1: B A 9 8 * DATA2: 7 6 5 4 * DATA3: 3 2 1 0 */ tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK); tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK; run_write(sc, RT3070_EFUSE_CTRL, tmp); for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0) return (error); if (!(tmp & RT3070_EFSROM_KICK)) break; run_delay(sc, 2); } if (ntries == 100) return (ETIMEDOUT); if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK) { *val = 0xffff; /* address not found */ return (0); } /* determine to which 32-bit register our 16-bit word belongs */ reg = RT3070_EFUSE_DATA3 - (addr & 0xc); if ((error = run_read(sc, reg, &tmp)) != 0) return (error); tmp >>= (8 * (addr & 0x3)); *val = (addr & 1) ? tmp >> 16 : tmp & 0xffff; return (0); } /* Read 16-bit from eFUSE ROM for RT3xxx. */ static int run_efuse_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val) { return (run_efuse_read(sc, addr, val, 2)); } static int run_eeprom_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val) { usb_device_request_t req; uint16_t tmp; int error; addr *= 2; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2870_EEPROM_READ; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, sizeof(tmp)); error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &tmp); if (error == 0) *val = le16toh(tmp); else *val = 0xffff; return (error); } static __inline int run_srom_read(struct run_softc *sc, uint16_t addr, uint16_t *val) { /* either eFUSE ROM or EEPROM */ return sc->sc_srom_read(sc, addr, val); } static int run_rt2870_rf_write(struct run_softc *sc, uint32_t val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_RF_CSR_CFG0, &tmp)) != 0) return (error); if (!(tmp & RT2860_RF_REG_CTRL)) break; } if (ntries == 10) return (ETIMEDOUT); return (run_write(sc, RT2860_RF_CSR_CFG0, val)); } static int run_rt3070_rf_read(struct run_softc *sc, uint8_t reg, uint8_t *val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 100) return (ETIMEDOUT); tmp = RT3070_RF_KICK | reg << 8; if ((error = run_write(sc, RT3070_RF_CSR_CFG, tmp)) != 0) return (error); for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 100) return (ETIMEDOUT); *val = tmp & 0xff; return (0); } static int run_rt3070_rf_write(struct run_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT3070_RF_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val; return (run_write(sc, RT3070_RF_CSR_CFG, tmp)); } static int run_bbp_read(struct run_softc *sc, uint8_t reg, uint8_t *val) { uint32_t tmp; int ntries, error; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT2860_BBP_CSR_READ | RT2860_BBP_CSR_KICK | reg << 8; if ((error = run_write(sc, RT2860_BBP_CSR_CFG, tmp)) != 0) return (error); for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); *val = tmp & 0xff; return (0); } static int run_bbp_write(struct run_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries, error; for (ntries = 0; ntries < 10; ntries++) { if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0) return (error); if (!(tmp & RT2860_BBP_CSR_KICK)) break; } if (ntries == 10) return (ETIMEDOUT); tmp = RT2860_BBP_CSR_KICK | reg << 8 | val; return (run_write(sc, RT2860_BBP_CSR_CFG, tmp)); } /* * Send a command to the 8051 microcontroller unit. */ static int run_mcu_cmd(struct run_softc *sc, uint8_t cmd, uint16_t arg) { uint32_t tmp; int error, ntries; for (ntries = 0; ntries < 100; ntries++) { if ((error = run_read(sc, RT2860_H2M_MAILBOX, &tmp)) != 0) return error; if (!(tmp & RT2860_H2M_BUSY)) break; } if (ntries == 100) return ETIMEDOUT; tmp = RT2860_H2M_BUSY | RT2860_TOKEN_NO_INTR << 16 | arg; if ((error = run_write(sc, RT2860_H2M_MAILBOX, tmp)) == 0) error = run_write(sc, RT2860_HOST_CMD, cmd); return (error); } /* * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. * Used to adjust per-rate Tx power registers. */ static __inline uint32_t b4inc(uint32_t b32, int8_t delta) { int8_t i, b4; for (i = 0; i < 8; i++) { b4 = b32 & 0xf; b4 += delta; if (b4 < 0) b4 = 0; else if (b4 > 0xf) b4 = 0xf; b32 = b32 >> 4 | b4 << 28; } return (b32); } static const char * run_get_rf(uint16_t rev) { switch (rev) { case RT2860_RF_2820: return "RT2820"; case RT2860_RF_2850: return "RT2850"; case RT2860_RF_2720: return "RT2720"; case RT2860_RF_2750: return "RT2750"; case RT3070_RF_3020: return "RT3020"; case RT3070_RF_2020: return "RT2020"; case RT3070_RF_3021: return "RT3021"; case RT3070_RF_3022: return "RT3022"; case RT3070_RF_3052: return "RT3052"; case RT3593_RF_3053: return "RT3053"; case RT5592_RF_5592: return "RT5592"; case RT5390_RF_5370: return "RT5370"; case RT5390_RF_5372: return "RT5372"; } return ("unknown"); } static void run_rt3593_get_txpower(struct run_softc *sc) { uint16_t addr, val; int i; /* Read power settings for 2GHz channels. */ for (i = 0; i < 14; i += 2) { addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE1 : RT2860_EEPROM_PWR2GHZ_BASE1; run_srom_read(sc, addr + i / 2, &val); sc->txpow1[i + 0] = (int8_t)(val & 0xff); sc->txpow1[i + 1] = (int8_t)(val >> 8); addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE2 : RT2860_EEPROM_PWR2GHZ_BASE2; run_srom_read(sc, addr + i / 2, &val); sc->txpow2[i + 0] = (int8_t)(val & 0xff); sc->txpow2[i + 1] = (int8_t)(val >> 8); if (sc->ntxchains == 3) { run_srom_read(sc, RT3593_EEPROM_PWR2GHZ_BASE3 + i / 2, &val); sc->txpow3[i + 0] = (int8_t)(val & 0xff); sc->txpow3[i + 1] = (int8_t)(val >> 8); } } /* Fix broken Tx power entries. */ for (i = 0; i < 14; i++) { if (sc->txpow1[i] > 31) sc->txpow1[i] = 5; if (sc->txpow2[i] > 31) sc->txpow2[i] = 5; if (sc->ntxchains == 3) { if (sc->txpow3[i] > 31) sc->txpow3[i] = 5; } } /* Read power settings for 5GHz channels. */ for (i = 0; i < 40; i += 2) { run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 14] = (int8_t)(val & 0xff); sc->txpow1[i + 15] = (int8_t)(val >> 8); run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 14] = (int8_t)(val & 0xff); sc->txpow2[i + 15] = (int8_t)(val >> 8); if (sc->ntxchains == 3) { run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE3 + i / 2, &val); sc->txpow3[i + 14] = (int8_t)(val & 0xff); sc->txpow3[i + 15] = (int8_t)(val >> 8); } } } static void run_get_txpower(struct run_softc *sc) { uint16_t val; int i; /* Read power settings for 2GHz channels. */ for (i = 0; i < 14; i += 2) { run_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 0] = (int8_t)(val & 0xff); sc->txpow1[i + 1] = (int8_t)(val >> 8); if (sc->mac_ver != 0x5390) { run_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 0] = (int8_t)(val & 0xff); sc->txpow2[i + 1] = (int8_t)(val >> 8); } } /* Fix broken Tx power entries. */ for (i = 0; i < 14; i++) { if (sc->mac_ver >= 0x5390) { if (sc->txpow1[i] < 0 || sc->txpow1[i] > 39) sc->txpow1[i] = 5; } else { if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31) sc->txpow1[i] = 5; } if (sc->mac_ver > 0x5390) { if (sc->txpow2[i] < 0 || sc->txpow2[i] > 39) sc->txpow2[i] = 5; } else if (sc->mac_ver < 0x5390) { if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31) sc->txpow2[i] = 5; } RUN_DPRINTF(sc, RUN_DEBUG_TXPWR, "chan %d: power1=%d, power2=%d\n", rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i]); } /* Read power settings for 5GHz channels. */ for (i = 0; i < 40; i += 2) { run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2, &val); sc->txpow1[i + 14] = (int8_t)(val & 0xff); sc->txpow1[i + 15] = (int8_t)(val >> 8); run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2, &val); sc->txpow2[i + 14] = (int8_t)(val & 0xff); sc->txpow2[i + 15] = (int8_t)(val >> 8); } /* Fix broken Tx power entries. */ for (i = 0; i < 40; i++ ) { if (sc->mac_ver != 0x5592) { if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15) sc->txpow1[14 + i] = 5; if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15) sc->txpow2[14 + i] = 5; } RUN_DPRINTF(sc, RUN_DEBUG_TXPWR, "chan %d: power1=%d, power2=%d\n", rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i], sc->txpow2[14 + i]); } } static int run_read_eeprom(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int8_t delta_2ghz, delta_5ghz; uint32_t tmp; uint16_t val; int ridx, ant, i; /* check whether the ROM is eFUSE ROM or EEPROM */ sc->sc_srom_read = run_eeprom_read_2; if (sc->mac_ver >= 0x3070) { run_read(sc, RT3070_EFUSE_CTRL, &tmp); RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EFUSE_CTRL=0x%08x\n", tmp); if ((tmp & RT3070_SEL_EFUSE) || sc->mac_ver == 0x3593) sc->sc_srom_read = run_efuse_read_2; } /* read ROM version */ run_srom_read(sc, RT2860_EEPROM_VERSION, &val); RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val >> 8, val & 0xff); /* read MAC address */ run_srom_read(sc, RT2860_EEPROM_MAC01, &val); ic->ic_macaddr[0] = val & 0xff; ic->ic_macaddr[1] = val >> 8; run_srom_read(sc, RT2860_EEPROM_MAC23, &val); ic->ic_macaddr[2] = val & 0xff; ic->ic_macaddr[3] = val >> 8; run_srom_read(sc, RT2860_EEPROM_MAC45, &val); ic->ic_macaddr[4] = val & 0xff; ic->ic_macaddr[5] = val >> 8; if (sc->mac_ver < 0x3593) { /* read vender BBP settings */ for (i = 0; i < 10; i++) { run_srom_read(sc, RT2860_EEPROM_BBP_BASE + i, &val); sc->bbp[i].val = val & 0xff; sc->bbp[i].reg = val >> 8; RUN_DPRINTF(sc, RUN_DEBUG_ROM, "BBP%d=0x%02x\n", sc->bbp[i].reg, sc->bbp[i].val); } if (sc->mac_ver >= 0x3071) { /* read vendor RF settings */ for (i = 0; i < 10; i++) { run_srom_read(sc, RT3071_EEPROM_RF_BASE + i, &val); sc->rf[i].val = val & 0xff; sc->rf[i].reg = val >> 8; RUN_DPRINTF(sc, RUN_DEBUG_ROM, "RF%d=0x%02x\n", sc->rf[i].reg, sc->rf[i].val); } } } /* read RF frequency offset from EEPROM */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS : RT3593_EEPROM_FREQ, &val); sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0; RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EEPROM freq offset %d\n", sc->freq & 0xff); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS : RT3593_EEPROM_FREQ_LEDS, &val); if (val >> 8 != 0xff) { /* read LEDs operating mode */ sc->leds = val >> 8; run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED1 : RT3593_EEPROM_LED1, &sc->led[0]); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED2 : RT3593_EEPROM_LED2, &sc->led[1]); run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED3 : RT3593_EEPROM_LED3, &sc->led[2]); } else { /* broken EEPROM, use default settings */ sc->leds = 0x01; sc->led[0] = 0x5555; sc->led[1] = 0x2221; sc->led[2] = 0x5627; /* differs from RT2860 */ } RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n", sc->leds, sc->led[0], sc->led[1], sc->led[2]); /* read RF information */ if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392) run_srom_read(sc, 0x00, &val); else run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val); if (val == 0xffff) { device_printf(sc->sc_dev, "invalid EEPROM antenna info, using default\n"); if (sc->mac_ver == 0x3572) { /* default to RF3052 2T2R */ sc->rf_rev = RT3070_RF_3052; sc->ntxchains = 2; sc->nrxchains = 2; } else if (sc->mac_ver >= 0x3070) { /* default to RF3020 1T1R */ sc->rf_rev = RT3070_RF_3020; sc->ntxchains = 1; sc->nrxchains = 1; } else { /* default to RF2820 1T2R */ sc->rf_rev = RT2860_RF_2820; sc->ntxchains = 1; sc->nrxchains = 2; } } else { if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392) { sc->rf_rev = val; run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val); } else sc->rf_rev = (val >> 8) & 0xf; sc->ntxchains = (val >> 4) & 0xf; sc->nrxchains = val & 0xf; } RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EEPROM RF rev=0x%04x chains=%dT%dR\n", sc->rf_rev, sc->ntxchains, sc->nrxchains); /* check if RF supports automatic Tx access gain control */ run_srom_read(sc, RT2860_EEPROM_CONFIG, &val); RUN_DPRINTF(sc, RUN_DEBUG_ROM, "EEPROM CFG 0x%04x\n", val); /* check if driver should patch the DAC issue */ if ((val >> 8) != 0xff) sc->patch_dac = (val >> 15) & 1; if ((val & 0xff) != 0xff) { sc->ext_5ghz_lna = (val >> 3) & 1; sc->ext_2ghz_lna = (val >> 2) & 1; /* check if RF supports automatic Tx access gain control */ sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1; /* check if we have a hardware radio switch */ sc->rfswitch = val & 1; } /* Read Tx power settings. */ if (sc->mac_ver == 0x3593) run_rt3593_get_txpower(sc); else run_get_txpower(sc); /* read Tx power compensation for each Tx rate */ run_srom_read(sc, RT2860_EEPROM_DELTAPWR, &val); delta_2ghz = delta_5ghz = 0; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_2ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_2ghz = -delta_2ghz; } val >>= 8; if ((val & 0xff) != 0xff && (val & 0x80)) { delta_5ghz = val & 0xf; if (!(val & 0x40)) /* negative number */ delta_5ghz = -delta_5ghz; } RUN_DPRINTF(sc, RUN_DEBUG_ROM | RUN_DEBUG_TXPWR, "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz, delta_5ghz); for (ridx = 0; ridx < 5; ridx++) { uint32_t reg; run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2, &val); reg = val; run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1, &val); reg |= (uint32_t)val << 16; sc->txpow20mhz[ridx] = reg; sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); RUN_DPRINTF(sc, RUN_DEBUG_ROM | RUN_DEBUG_TXPWR, "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " "40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx]); } /* Read RSSI offsets and LNA gains from EEPROM. */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_2GHZ : RT3593_EEPROM_RSSI1_2GHZ, &val); sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ sc->rssi_2ghz[1] = val >> 8; /* Ant B */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_2GHZ : RT3593_EEPROM_RSSI2_2GHZ, &val); if (sc->mac_ver >= 0x3070) { if (sc->mac_ver == 0x3593) { sc->txmixgain_2ghz = 0; sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ } else { /* * On RT3070 chips (limited to 2 Rx chains), this ROM * field contains the Tx mixer gain for the 2GHz band. */ if ((val & 0xff) != 0xff) sc->txmixgain_2ghz = val & 0x7; } RUN_DPRINTF(sc, RUN_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz); } else sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ if (sc->mac_ver == 0x3593) run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val); sc->lna[2] = val >> 8; /* channel group 2 */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_5GHZ : RT3593_EEPROM_RSSI1_5GHZ, &val); sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ sc->rssi_5ghz[1] = val >> 8; /* Ant B */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_5GHZ : RT3593_EEPROM_RSSI2_5GHZ, &val); if (sc->mac_ver == 0x3572) { /* * On RT3572 chips (limited to 2 Rx chains), this ROM * field contains the Tx mixer gain for the 5GHz band. */ if ((val & 0xff) != 0xff) sc->txmixgain_5ghz = val & 0x7; RUN_DPRINTF(sc, RUN_DEBUG_ROM, "tx mixer gain=%u (5GHz)\n", sc->txmixgain_5ghz); } else sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ if (sc->mac_ver == 0x3593) { sc->txmixgain_5ghz = 0; run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val); } sc->lna[3] = val >> 8; /* channel group 3 */ run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LNA : RT3593_EEPROM_LNA, &val); sc->lna[0] = val & 0xff; /* channel group 0 */ sc->lna[1] = val >> 8; /* channel group 1 */ /* fix broken 5GHz LNA entries */ if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { RUN_DPRINTF(sc, RUN_DEBUG_ROM, "invalid LNA for channel group %d\n", 2); sc->lna[2] = sc->lna[1]; } if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { RUN_DPRINTF(sc, RUN_DEBUG_ROM, "invalid LNA for channel group %d\n", 3); sc->lna[3] = sc->lna[1]; } /* fix broken RSSI offset entries */ for (ant = 0; ant < 3; ant++) { if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { RUN_DPRINTF(sc, RUN_DEBUG_ROM | RUN_DEBUG_RSSI, "invalid RSSI%d offset: %d (2GHz)\n", ant + 1, sc->rssi_2ghz[ant]); sc->rssi_2ghz[ant] = 0; } if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { RUN_DPRINTF(sc, RUN_DEBUG_ROM | RUN_DEBUG_RSSI, "invalid RSSI%d offset: %d (5GHz)\n", ant + 1, sc->rssi_5ghz[ant]); sc->rssi_5ghz[ant] = 0; } } return (0); } static struct ieee80211_node * run_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { return malloc(sizeof (struct run_node), M_DEVBUF, M_NOWAIT | M_ZERO); } static int run_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_txparam *tp; struct run_softc *sc = ic->ic_softc; uint8_t rate, ridx; int error; RUN_LOCK(sc); error = ieee80211_media_change(ifp); if (error != ENETRESET) { RUN_UNLOCK(sc); return (error); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { struct ieee80211_node *ni; struct run_node *rn; rate = ic->ic_sup_rates[ic->ic_curmode]. rs_rates[tp->ucastrate] & IEEE80211_RATE_VAL; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; ni = ieee80211_ref_node(vap->iv_bss); rn = RUN_NODE(ni); rn->fix_ridx = ridx; RUN_DPRINTF(sc, RUN_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate, rn->fix_ridx); ieee80211_free_node(ni); } #if 0 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & RUN_RUNNING)){ run_init_locked(sc); } #endif RUN_UNLOCK(sc); return (0); } static int run_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { const struct ieee80211_txparam *tp; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; struct run_vap *rvp = RUN_VAP(vap); enum ieee80211_state ostate; uint32_t sta[3]; uint32_t tmp; uint8_t ratectl; uint8_t restart_ratectl = 0; uint8_t bid = 1 << rvp->rvp_id; ostate = vap->iv_state; RUN_DPRINTF(sc, RUN_DEBUG_STATE, "%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUN_LOCK(sc); ratectl = sc->ratectl_run; /* remember current state */ sc->ratectl_run = RUN_RATECTL_OFF; usb_callout_stop(&sc->ratectl_ch); if (ostate == IEEE80211_S_RUN) { /* turn link LED off */ run_set_leds(sc, RT2860_LED_RADIO); } switch (nstate) { case IEEE80211_S_INIT: restart_ratectl = 1; if (ostate != IEEE80211_S_RUN) break; ratectl &= ~bid; sc->runbmap &= ~bid; /* abort TSF synchronization if there is no vap running */ if (--sc->running == 0) { run_read(sc, RT2860_BCN_TIME_CFG, &tmp); run_write(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); } break; case IEEE80211_S_RUN: if (!(sc->runbmap & bid)) { if(sc->running++) restart_ratectl = 1; sc->runbmap |= bid; } m_freem(rvp->beacon_mbuf); rvp->beacon_mbuf = NULL; switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: sc->ap_running |= bid; ic->ic_opmode = vap->iv_opmode; run_update_beacon_cb(vap); break; case IEEE80211_M_IBSS: sc->adhoc_running |= bid; if (!sc->ap_running) ic->ic_opmode = vap->iv_opmode; run_update_beacon_cb(vap); break; case IEEE80211_M_STA: sc->sta_running |= bid; if (!sc->ap_running && !sc->adhoc_running) ic->ic_opmode = vap->iv_opmode; /* read statistic counters (clear on read) */ run_read_region_1(sc, RT2860_TX_STA_CNT0, (uint8_t *)sta, sizeof sta); break; default: ic->ic_opmode = vap->iv_opmode; break; } if (vap->iv_opmode != IEEE80211_M_MONITOR) { struct ieee80211_node *ni; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { RUN_UNLOCK(sc); IEEE80211_LOCK(ic); return (-1); } run_updateslot(ic); run_enable_mrr(sc); run_set_txpreamble(sc); run_set_basicrates(sc); ni = ieee80211_ref_node(vap->iv_bss); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); run_set_bssid(sc, sc->sc_bssid); ieee80211_free_node(ni); run_enable_tsf_sync(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ratectl |= bid; } else run_enable_tsf(sc); /* turn link LED on */ run_set_leds(sc, RT2860_LED_RADIO | (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ)); break; default: RUN_DPRINTF(sc, RUN_DEBUG_STATE, "undefined state\n"); break; } /* restart amrr for running VAPs */ if ((sc->ratectl_run = ratectl) && restart_ratectl) usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); RUN_UNLOCK(sc); IEEE80211_LOCK(ic); return(rvp->newstate(vap, nstate, arg)); } static int run_wme_update(struct ieee80211com *ic) { struct chanAccParams chp; struct run_softc *sc = ic->ic_softc; const struct wmeParams *ac; int aci, error = 0; ieee80211_wme_ic_getparams(ic, &chp); ac = chp.cap_wmeParams; /* update MAC TX configuration registers */ RUN_LOCK(sc); for (aci = 0; aci < WME_NUM_AC; aci++) { error = run_write(sc, RT2860_EDCA_AC_CFG(aci), ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 | ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit); if (error) goto err; } /* update SCH/DMA registers too */ error = run_write(sc, RT2860_WMM_AIFSN_CFG, ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 | ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn); if (error) goto err; error = run_write(sc, RT2860_WMM_CWMIN_CFG, ac[WME_AC_VO].wmep_logcwmin << 12 | ac[WME_AC_VI].wmep_logcwmin << 8 | ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin); if (error) goto err; error = run_write(sc, RT2860_WMM_CWMAX_CFG, ac[WME_AC_VO].wmep_logcwmax << 12 | ac[WME_AC_VI].wmep_logcwmax << 8 | ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax); if (error) goto err; error = run_write(sc, RT2860_WMM_TXOP0_CFG, ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit); if (error) goto err; error = run_write(sc, RT2860_WMM_TXOP1_CFG, ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit); err: RUN_UNLOCK(sc); if (error) RUN_DPRINTF(sc, RUN_DEBUG_USB, "WME update failed\n"); return (error); } static void run_key_set_cb(void *arg) { struct run_cmdq *cmdq = arg; struct ieee80211vap *vap = cmdq->arg1; struct ieee80211_key *k = cmdq->k; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; struct ieee80211_node *ni; u_int cipher = k->wk_cipher->ic_cipher; uint32_t attr; uint16_t base, associd; uint8_t mode, wcid, iv[8]; RUN_LOCK_ASSERT(sc, MA_OWNED); if (vap->iv_opmode == IEEE80211_M_HOSTAP) ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac); else ni = vap->iv_bss; associd = (ni != NULL) ? ni->ni_associd : 0; /* map net80211 cipher to RT2860 security mode */ switch (cipher) { case IEEE80211_CIPHER_WEP: if(k->wk_keylen < 8) mode = RT2860_MODE_WEP40; else mode = RT2860_MODE_WEP104; break; case IEEE80211_CIPHER_TKIP: mode = RT2860_MODE_TKIP; break; case IEEE80211_CIPHER_AES_CCM: mode = RT2860_MODE_AES_CCMP; break; default: RUN_DPRINTF(sc, RUN_DEBUG_KEY, "undefined case\n"); return; } RUN_DPRINTF(sc, RUN_DEBUG_KEY, "associd=%x, keyix=%d, mode=%x, type=%s, tx=%s, rx=%s\n", associd, k->wk_keyix, mode, (k->wk_flags & IEEE80211_KEY_GROUP) ? "group" : "pairwise", (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); if (k->wk_flags & IEEE80211_KEY_GROUP) { wcid = 0; /* NB: update WCID0 for group keys */ base = RT2860_SKEY(RUN_VAP(vap)->rvp_id, k->wk_keyix); } else { wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(associd); base = RT2860_PKEY(wcid); } if (cipher == IEEE80211_CIPHER_TKIP) { if(run_write_region_1(sc, base, k->wk_key, 16)) return; if(run_write_region_1(sc, base + 16, &k->wk_key[16], 8)) /* wk_txmic */ return; if(run_write_region_1(sc, base + 24, &k->wk_key[24], 8)) /* wk_rxmic */ return; } else { /* roundup len to 16-bit: XXX fix write_region_1() instead */ if(run_write_region_1(sc, base, k->wk_key, (k->wk_keylen + 1) & ~1)) return; } if (!(k->wk_flags & IEEE80211_KEY_GROUP) || (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) { /* set initial packet number in IV+EIV */ if (cipher == IEEE80211_CIPHER_WEP) { memset(iv, 0, sizeof iv); iv[3] = vap->iv_def_txkey << 6; } else { if (cipher == IEEE80211_CIPHER_TKIP) { iv[0] = k->wk_keytsc >> 8; iv[1] = (iv[0] | 0x20) & 0x7f; iv[2] = k->wk_keytsc; } else /* CCMP */ { iv[0] = k->wk_keytsc; iv[1] = k->wk_keytsc >> 8; iv[2] = 0; } iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV; iv[4] = k->wk_keytsc >> 16; iv[5] = k->wk_keytsc >> 24; iv[6] = k->wk_keytsc >> 32; iv[7] = k->wk_keytsc >> 40; } if (run_write_region_1(sc, RT2860_IVEIV(wcid), iv, 8)) return; } if (k->wk_flags & IEEE80211_KEY_GROUP) { /* install group key */ if (run_read(sc, RT2860_SKEY_MODE_0_7, &attr)) return; attr &= ~(0xf << (k->wk_keyix * 4)); attr |= mode << (k->wk_keyix * 4); if (run_write(sc, RT2860_SKEY_MODE_0_7, attr)) return; } else { /* install pairwise key */ if (run_read(sc, RT2860_WCID_ATTR(wcid), &attr)) return; attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN; if (run_write(sc, RT2860_WCID_ATTR(wcid), attr)) return; } /* TODO create a pass-thru key entry? */ /* need wcid to delete the right key later */ k->wk_pad = wcid; } /* * Don't have to be deferred, but in order to keep order of * execution, i.e. with run_key_delete(), defer this and let * run_cmdq_cb() maintain the order. * * return 0 on error */ static int run_key_set(struct ieee80211vap *vap, struct ieee80211_key *k) { struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_KEY, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_key_set_cb; sc->cmdq[i].arg0 = NULL; sc->cmdq[i].arg1 = vap; sc->cmdq[i].k = k; IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr); ieee80211_runtask(ic, &sc->cmdq_task); /* * To make sure key will be set when hostapd * calls iv_key_set() before if_init(). */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { RUN_LOCK(sc); sc->cmdq_key_set = RUN_CMDQ_GO; RUN_UNLOCK(sc); } return (1); } /* * If wlan is destroyed without being brought down i.e. without * wlan down or wpa_cli terminate, this function is called after * vap is gone. Don't refer it. */ static void run_key_delete_cb(void *arg) { struct run_cmdq *cmdq = arg; struct run_softc *sc = cmdq->arg1; struct ieee80211_key *k = &cmdq->key; uint32_t attr; uint8_t wcid; RUN_LOCK_ASSERT(sc, MA_OWNED); if (k->wk_flags & IEEE80211_KEY_GROUP) { /* remove group key */ RUN_DPRINTF(sc, RUN_DEBUG_KEY, "removing group key\n"); run_read(sc, RT2860_SKEY_MODE_0_7, &attr); attr &= ~(0xf << (k->wk_keyix * 4)); run_write(sc, RT2860_SKEY_MODE_0_7, attr); } else { /* remove pairwise key */ RUN_DPRINTF(sc, RUN_DEBUG_KEY, "removing key for wcid %x\n", k->wk_pad); /* matching wcid was written to wk_pad in run_key_set() */ wcid = k->wk_pad; run_read(sc, RT2860_WCID_ATTR(wcid), &attr); attr &= ~0xf; run_write(sc, RT2860_WCID_ATTR(wcid), attr); run_set_region_4(sc, RT2860_WCID_ENTRY(wcid), 0, 8); } k->wk_pad = 0; } /* * return 0 on error */ static int run_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k) { struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; struct ieee80211_key *k0; uint32_t i; /* * When called back, key might be gone. So, make a copy * of some values need to delete keys before deferring. * But, because of LOR with node lock, cannot use lock here. * So, use atomic instead. */ i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_KEY, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_key_delete_cb; sc->cmdq[i].arg0 = NULL; sc->cmdq[i].arg1 = sc; k0 = &sc->cmdq[i].key; k0->wk_flags = k->wk_flags; k0->wk_keyix = k->wk_keyix; /* matching wcid was written to wk_pad in run_key_set() */ k0->wk_pad = k->wk_pad; ieee80211_runtask(ic, &sc->cmdq_task); return (1); /* return fake success */ } static void run_ratectl_to(void *arg) { struct run_softc *sc = arg; /* do it in a process context, so it can go sleep */ ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task); /* next timeout will be rescheduled in the callback task */ } /* ARGSUSED */ static void run_ratectl_cb(void *arg, int pending) { struct run_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap == NULL) return; if (sc->rvp_cnt > 1 || vap->iv_opmode != IEEE80211_M_STA) { /* * run_reset_livelock() doesn't do anything with AMRR, * but Ralink wants us to call it every 1 sec. So, we * piggyback here rather than creating another callout. * Livelock may occur only in HOSTAP or IBSS mode * (when h/w is sending beacons). */ RUN_LOCK(sc); run_reset_livelock(sc); /* just in case, there are some stats to drain */ run_drain_fifo(sc); RUN_UNLOCK(sc); } ieee80211_iterate_nodes(&ic->ic_sta, run_iter_func, sc); RUN_LOCK(sc); if(sc->ratectl_run != RUN_RATECTL_OFF) usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); RUN_UNLOCK(sc); } static void run_drain_fifo(void *arg) { struct run_softc *sc = arg; uint32_t stat; uint16_t (*wstat)[3]; uint8_t wcid, mcs, pid; int8_t retry; RUN_LOCK_ASSERT(sc, MA_OWNED); for (;;) { /* drain Tx status FIFO (maxsize = 16) */ run_read(sc, RT2860_TX_STAT_FIFO, &stat); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "tx stat 0x%08x\n", stat); if (!(stat & RT2860_TXQ_VLD)) break; wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff; /* if no ACK was requested, no feedback is available */ if (!(stat & RT2860_TXQ_ACKREQ) || wcid > RT2870_WCID_MAX || wcid == 0) continue; /* * Even though each stat is Tx-complete-status like format, * the device can poll stats. Because there is no guarantee * that the referring node is still around when read the stats. * So that, if we use ieee80211_ratectl_tx_update(), we will * have hard time not to refer already freed node. * * To eliminate such page faults, we poll stats in softc. * Then, update the rates later with ieee80211_ratectl_tx_update(). */ wstat = &(sc->wcid_stats[wcid]); (*wstat)[RUN_TXCNT]++; if (stat & RT2860_TXQ_OK) (*wstat)[RUN_SUCCESS]++; else counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* * Check if there were retries, ie if the Tx success rate is * different from the requested rate. Note that it works only * because we do not allow rate fallback from OFDM to CCK. */ mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f; pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf; if ((retry = pid -1 - mcs) > 0) { (*wstat)[RUN_TXCNT] += retry; (*wstat)[RUN_RETRY] += retry; } } RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt); sc->fifo_cnt = 0; } static void run_iter_func(void *arg, struct ieee80211_node *ni) { struct run_softc *sc = arg; struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; struct ieee80211vap *vap = ni->ni_vap; struct run_node *rn = RUN_NODE(ni); union run_stats sta[2]; uint16_t (*wstat)[3]; int error; RUN_LOCK(sc); /* Check for special case */ if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA && ni != vap->iv_bss) goto fail; txs->flags = IEEE80211_RATECTL_TX_STATS_NODE | IEEE80211_RATECTL_TX_STATS_RETRIES; txs->ni = ni; if (sc->rvp_cnt <= 1 && (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_STA)) { /* read statistic counters (clear on read) and update AMRR state */ error = run_read_region_1(sc, RT2860_TX_STA_CNT0, (uint8_t *)sta, sizeof sta); if (error != 0) goto fail; /* count failed TX as errors */ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, le16toh(sta[0].error.fail)); txs->nretries = le16toh(sta[1].tx.retry); txs->nsuccess = le16toh(sta[1].tx.success); /* nretries??? */ txs->nframes = txs->nretries + txs->nsuccess + le16toh(sta[0].error.fail); RUN_DPRINTF(sc, RUN_DEBUG_RATE, "retrycnt=%d success=%d failcnt=%d\n", txs->nretries, txs->nsuccess, le16toh(sta[0].error.fail)); } else { wstat = &(sc->wcid_stats[RUN_AID2WCID(ni->ni_associd)]); if (wstat == &(sc->wcid_stats[0]) || wstat > &(sc->wcid_stats[RT2870_WCID_MAX])) goto fail; txs->nretries = (*wstat)[RUN_RETRY]; txs->nsuccess = (*wstat)[RUN_SUCCESS]; txs->nframes = (*wstat)[RUN_TXCNT]; RUN_DPRINTF(sc, RUN_DEBUG_RATE, "retrycnt=%d txcnt=%d success=%d\n", txs->nretries, txs->nframes, txs->nsuccess); memset(wstat, 0, sizeof(*wstat)); } ieee80211_ratectl_tx_update(vap, txs); rn->amrr_ridx = ieee80211_ratectl_rate(ni, NULL, 0); fail: RUN_UNLOCK(sc); RUN_DPRINTF(sc, RUN_DEBUG_RATE, "ridx=%d\n", rn->amrr_ridx); } static void run_newassoc_cb(void *arg) { struct run_cmdq *cmdq = arg; struct ieee80211_node *ni = cmdq->arg1; struct run_softc *sc = ni->ni_vap->iv_ic->ic_softc; uint8_t wcid = cmdq->wcid; RUN_LOCK_ASSERT(sc, MA_OWNED); run_write_region_1(sc, RT2860_WCID_ENTRY(wcid), ni->ni_macaddr, IEEE80211_ADDR_LEN); memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid])); } static void run_newassoc(struct ieee80211_node *ni, int isnew) { struct run_node *rn = RUN_NODE(ni); struct ieee80211_rateset *rs = &ni->ni_rates; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; uint8_t rate; uint8_t ridx; uint8_t wcid; int i, j; wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(ni->ni_associd); if (wcid > RT2870_WCID_MAX) { device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid); return; } /* only interested in true associations */ if (isnew && ni->ni_associd != 0) { /* * This function could is called though timeout function. * Need to defer. */ uint32_t cnt = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_STATE, "cmdq_store=%d\n", cnt); sc->cmdq[cnt].func = run_newassoc_cb; sc->cmdq[cnt].arg0 = NULL; sc->cmdq[cnt].arg1 = ni; sc->cmdq[cnt].wcid = wcid; ieee80211_runtask(ic, &sc->cmdq_task); } RUN_DPRINTF(sc, RUN_DEBUG_STATE, "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd, ether_sprintf(ni->ni_macaddr)); for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i] & IEEE80211_RATE_VAL; /* convert 802.11 rate to hardware rate index */ for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; rn->ridx[i] = ridx; /* determine rate of control response frames */ for (j = i; j >= 0; j--) { if ((rs->rs_rates[j] & IEEE80211_RATE_BASIC) && rt2860_rates[rn->ridx[i]].phy == rt2860_rates[rn->ridx[j]].phy) break; } if (j >= 0) { rn->ctl_ridx[i] = rn->ridx[j]; } else { /* no basic rate found, use mandatory one */ rn->ctl_ridx[i] = rt2860_rates[ridx].ctl_ridx; } RUN_DPRINTF(sc, RUN_DEBUG_STATE | RUN_DEBUG_RATE, "rate=0x%02x ridx=%d ctl_ridx=%d\n", rs->rs_rates[i], rn->ridx[i], rn->ctl_ridx[i]); } rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; rn->mgt_ridx = ridx; RUN_DPRINTF(sc, RUN_DEBUG_STATE | RUN_DEBUG_RATE, "rate=%d, mgmt_ridx=%d\n", rate, rn->mgt_ridx); RUN_LOCK(sc); if(sc->ratectl_run != RUN_RATECTL_OFF) usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc); RUN_UNLOCK(sc); } /* * Return the Rx chain with the highest RSSI for a given frame. */ static __inline uint8_t run_maxrssi_chain(struct run_softc *sc, const struct rt2860_rxwi *rxwi) { uint8_t rxchain = 0; if (sc->nrxchains > 1) { if (rxwi->rssi[1] > rxwi->rssi[rxchain]) rxchain = 1; if (sc->nrxchains > 2) if (rxwi->rssi[2] > rxwi->rssi[rxchain]) rxchain = 2; } return (rxchain); } static void run_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct run_softc *sc = vap->iv_ic->ic_softc; struct run_vap *rvp = RUN_VAP(vap); uint64_t ni_tstamp, rx_tstamp; rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); if (vap->iv_state == IEEE80211_S_RUN && (subtype == IEEE80211_FC0_SUBTYPE_BEACON || subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { ni_tstamp = le64toh(ni->ni_tstamp.tsf); RUN_LOCK(sc); run_get_tsf(sc, &rx_tstamp); RUN_UNLOCK(sc); rx_tstamp = le64toh(rx_tstamp); if (ni_tstamp >= rx_tstamp) { RUN_DPRINTF(sc, RUN_DEBUG_RECV | RUN_DEBUG_BEACON, "ibss merge, tsf %ju tstamp %ju\n", (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); (void) ieee80211_ibss_merge(ni); } } } static void run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct rt2870_rxd *rxd; struct rt2860_rxwi *rxwi; uint32_t flags; uint16_t len, rxwisize; uint8_t ant, rssi; int8_t nf; rxwi = mtod(m, struct rt2860_rxwi *); len = le16toh(rxwi->len) & 0xfff; rxwisize = sizeof(struct rt2860_rxwi); if (sc->mac_ver == 0x5592) rxwisize += sizeof(uint64_t); else if (sc->mac_ver == 0x3593) rxwisize += sizeof(uint32_t); if (__predict_false(len > dmalen)) { m_freem(m); counter_u64_add(ic->ic_ierrors, 1); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "bad RXWI length %u > %u\n", len, dmalen); return; } /* Rx descriptor is located at the end */ rxd = (struct rt2870_rxd *)(mtod(m, caddr_t) + dmalen); flags = le32toh(rxd->flags); if (__predict_false(flags & (RT2860_RX_CRCERR | RT2860_RX_ICVERR))) { m_freem(m); counter_u64_add(ic->ic_ierrors, 1); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "%s error.\n", (flags & RT2860_RX_CRCERR)?"CRC":"ICV"); return; } m->m_data += rxwisize; m->m_pkthdr.len = m->m_len -= rxwisize; wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; m->m_flags |= M_WEP; } if (flags & RT2860_RX_L2PAD) { RUN_DPRINTF(sc, RUN_DEBUG_RECV, "received RT2860_RX_L2PAD frame\n"); len += 2; } ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (__predict_false(flags & RT2860_RX_MICERR)) { /* report MIC failures to net80211 for TKIP */ if (ni != NULL) ieee80211_notify_michael_failure(ni->ni_vap, wh, rxwi->keyidx); m_freem(m); counter_u64_add(ic->ic_ierrors, 1); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "MIC error. Someone is lying.\n"); return; } ant = run_maxrssi_chain(sc, rxwi); rssi = rxwi->rssi[ant]; nf = run_rssi2dbm(sc, rssi, ant); m->m_pkthdr.len = m->m_len = len; if (__predict_false(ieee80211_radiotap_active(ic))) { struct run_rx_radiotap_header *tap = &sc->sc_rxtap; uint16_t phy; tap->wr_flags = 0; tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wr_antsignal = rssi; tap->wr_antenna = ant; tap->wr_dbm_antsignal = run_rssi2dbm(sc, rssi, ant); tap->wr_rate = 2; /* in case it can't be found below */ RUN_LOCK(sc); run_get_tsf(sc, &tap->wr_tsf); RUN_UNLOCK(sc); phy = le16toh(rxwi->phy); switch (phy & RT2860_PHY_MODE) { case RT2860_PHY_CCK: switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) { case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; } if (phy & RT2860_PHY_SHPRE) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; break; case RT2860_PHY_OFDM: switch (phy & RT2860_PHY_MCS) { case 0: tap->wr_rate = 12; break; case 1: tap->wr_rate = 18; break; case 2: tap->wr_rate = 24; break; case 3: tap->wr_rate = 36; break; case 4: tap->wr_rate = 48; break; case 5: tap->wr_rate = 72; break; case 6: tap->wr_rate = 96; break; case 7: tap->wr_rate = 108; break; } break; } } if (ni != NULL) { (void)ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else { (void)ieee80211_input_all(ic, m, rssi, nf); } } static void run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct run_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m = NULL; struct mbuf *m0; uint32_t dmalen; uint16_t rxwisize; int xferlen; rxwisize = sizeof(struct rt2860_rxwi); if (sc->mac_ver == 0x5592) rxwisize += sizeof(uint64_t); else if (sc->mac_ver == 0x3593) rxwisize += sizeof(uint32_t); usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: RUN_DPRINTF(sc, RUN_DEBUG_RECV, "rx done, actlen=%d\n", xferlen); if (xferlen < (int)(sizeof(uint32_t) + rxwisize + sizeof(struct rt2870_rxd))) { RUN_DPRINTF(sc, RUN_DEBUG_RECV_DESC | RUN_DEBUG_USB, "xfer too short %d\n", xferlen); goto tr_setup; } m = sc->rx_m; sc->rx_m = NULL; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if (sc->rx_m == NULL) { sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE /* xfer can be bigger than MCLBYTES */); } if (sc->rx_m == NULL) { RUN_DPRINTF(sc, RUN_DEBUG_RECV | RUN_DEBUG_RECV_DESC, "could not allocate mbuf - idle with stall\n"); counter_u64_add(ic->ic_ierrors, 1); usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); } else { /* * Directly loading a mbuf cluster into DMA to * save some data copying. This works because * there is only one cluster. */ usbd_xfer_set_frame_data(xfer, 0, mtod(sc->rx_m, caddr_t), RUN_MAX_RXSZ); usbd_xfer_set_frames(xfer, 1); } usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if (sc->rx_m != NULL) { m_freem(sc->rx_m); sc->rx_m = NULL; } break; } if (m == NULL) return; /* inputting all the frames must be last */ RUN_UNLOCK(sc); m->m_pkthdr.len = m->m_len = xferlen; /* HW can aggregate multiple 802.11 frames in a single USB xfer */ for(;;) { dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff; if ((dmalen >= (uint32_t)-8) || (dmalen == 0) || ((dmalen & 3) != 0)) { RUN_DPRINTF(sc, RUN_DEBUG_RECV_DESC | RUN_DEBUG_USB, "bad DMA length %u\n", dmalen); break; } if ((dmalen + 8) > (uint32_t)xferlen) { RUN_DPRINTF(sc, RUN_DEBUG_RECV_DESC | RUN_DEBUG_USB, "bad DMA length %u > %d\n", dmalen + 8, xferlen); break; } /* If it is the last one or a single frame, we won't copy. */ if ((xferlen -= dmalen + 8) <= 8) { /* trim 32-bit DMA-len header */ m->m_data += 4; m->m_pkthdr.len = m->m_len -= 4; run_rx_frame(sc, m, dmalen); m = NULL; /* don't free source buffer */ break; } /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { RUN_DPRINTF(sc, RUN_DEBUG_RECV_DESC, "could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); break; } m_copydata(m, 4 /* skip 32-bit DMA-len header */, dmalen + sizeof(struct rt2870_rxd), mtod(m0, caddr_t)); m0->m_pkthdr.len = m0->m_len = dmalen + sizeof(struct rt2870_rxd); run_rx_frame(sc, m0, dmalen); /* update data ptr */ m->m_data += dmalen + 8; m->m_pkthdr.len = m->m_len -= dmalen + 8; } /* make sure we free the source buffer, if any */ m_freem(m); RUN_LOCK(sc); } static void run_tx_free(struct run_endpoint_queue *pq, struct run_tx_data *data, int txerr) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); pq->tx_nfree++; } static void run_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index) { struct run_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct run_tx_data *data; struct ieee80211vap *vap = NULL; struct usb_page_cache *pc; struct run_endpoint_queue *pq = &sc->sc_epq[index]; struct mbuf *m; usb_frlength_t size; int actlen; int sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_USB, "transfer complete: %d bytes @ index %d\n", actlen, index); data = usbd_xfer_get_priv(xfer); run_tx_free(pq, data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&pq->tx_qh); if (data == NULL) break; STAILQ_REMOVE_HEAD(&pq->tx_qh, next); m = data->m; size = (sc->mac_ver == 0x5592) ? sizeof(data->desc) + sizeof(uint32_t) : sizeof(data->desc); if ((m->m_pkthdr.len + size + 3 + 8) > RUN_MAX_TXSZ) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT_DESC | RUN_DEBUG_USB, "data overflow, %u bytes\n", m->m_pkthdr.len); run_tx_free(pq, data, 1); goto tr_setup; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, size); usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len); size += m->m_pkthdr.len; /* * Align end on a 4-byte boundary, pad 8 bytes (CRC + * 4-byte padding), and be sure to zero those trailing * bytes: */ usbd_frame_zero(pc, size, ((-size) & 3) + 8); size += ((-size) & 3) + 8; vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct run_tx_radiotap_header *tap = &sc->sc_txtap; struct rt2860_txwi *txwi = (struct rt2860_txwi *)(&data->desc + sizeof(struct rt2870_txd)); tap->wt_flags = 0; tap->wt_rate = rt2860_rates[data->ridx].rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_hwqueue = index; if (le16toh(txwi->phy) & RT2860_PHY_SHPRE) tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; ieee80211_radiotap_tx(vap, m); } RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_USB, "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len, size, index); usbd_xfer_set_frame_len(xfer, 0, size); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); run_start(sc); break; default: RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_USB, "USB transfer error, %s\n", usbd_errstr(error)); data = usbd_xfer_get_priv(xfer); if (data != NULL) { if(data->ni != NULL) vap = data->ni->ni_vap; run_tx_free(pq, data, error); usbd_xfer_set_priv(xfer, NULL); } if (vap == NULL) vap = TAILQ_FIRST(&ic->ic_vaps); if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) { device_printf(sc->sc_dev, "device timeout\n"); uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_USB, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_usb_timeout_cb; sc->cmdq[i].arg0 = vap; ieee80211_runtask(ic, &sc->cmdq_task); } /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void run_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 0); } static void run_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 1); } static void run_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 2); } static void run_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 3); } static void run_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 4); } static void run_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error) { run_bulk_tx_callbackN(xfer, error, 5); } static void run_set_tx_desc(struct run_softc *sc, struct run_tx_data *data) { struct mbuf *m = data->m; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = data->ni->ni_vap; struct ieee80211_frame *wh; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t xferlen, txwisize; uint16_t mcs; uint8_t ridx = data->ridx; uint8_t pad; /* get MCS code from rate index */ mcs = rt2860_rates[ridx].mcs; txwisize = (sc->mac_ver == 0x5592) ? sizeof(*txwi) + sizeof(uint32_t) : sizeof(*txwi); xferlen = txwisize + m->m_pkthdr.len; /* roundup to 32-bit alignment */ xferlen = (xferlen + 3) & ~3; txd = (struct rt2870_txd *)&data->desc; txd->len = htole16(xferlen); wh = mtod(m, struct ieee80211_frame *); /* * Ether both are true or both are false, the header * are nicely aligned to 32-bit. So, no L2 padding. */ if(IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh)) pad = 0; else pad = 2; /* setup TX Wireless Information */ txwi = (struct rt2860_txwi *)(txd + 1); txwi->len = htole16(m->m_pkthdr.len - pad); if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { mcs |= RT2860_PHY_CCK; if (ridx != RT2860_RIDX_CCK1 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) mcs |= RT2860_PHY_SHPRE; } else mcs |= RT2860_PHY_OFDM; txwi->phy = htole16(mcs); /* check if RTS/CTS or CTS-to-self protection is required */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold || ((ic->ic_flags & IEEE80211_F_USEPROT) && rt2860_rates[ridx].phy == IEEE80211_T_OFDM))) txwi->txop |= RT2860_TX_TXOP_HT; else txwi->txop |= RT2860_TX_TXOP_BACKOFF; if (vap->iv_opmode != IEEE80211_M_STA && !IEEE80211_QOS_HAS_SEQ(wh)) txwi->xflags |= RT2860_TX_NSEQ; } /* This function must be called locked */ static int run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct run_node *rn = RUN_NODE(ni); struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t qos; uint16_t dur; uint16_t qid; uint8_t type; uint8_t tid; uint8_t ridx; uint8_t ctl_ridx; uint8_t qflags; uint8_t xflags = 0; int hasqos; RUN_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* * There are 7 bulk endpoints: 1 for RX * and 6 for TX (4 EDCAs + HCCA + Prio). * Update 03-14-2009: some devices like the Planex GW-US300MiniS * seem to have only 4 TX bulk endpoints (Fukaumi Naoki). */ if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) { uint8_t *frm; if(IEEE80211_HAS_ADDR4(wh)) frm = ((struct ieee80211_qosframe_addr4 *)wh)->i_qos; else frm =((struct ieee80211_qosframe *)wh)->i_qos; qos = le16toh(*(const uint16_t *)frm); tid = qos & IEEE80211_QOS_TID; qid = TID_TO_WME_AC(tid); } else { qos = 0; tid = 0; qid = WME_AC_BE; } qflags = (qid < 4) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_HCCA; RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "qos %d\tqid %d\ttid %d\tqflags %x\n", qos, qid, tid, qflags); /* pickup a rate index */ if (IEEE80211_IS_MULTICAST(wh->i_addr1) || type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) { ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; ctl_ridx = rt2860_rates[ridx].ctl_ridx; } else { if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) ridx = rn->fix_ridx; else ridx = rn->amrr_ridx; ctl_ridx = rt2860_rates[ridx].ctl_ridx; } if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (!hasqos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK)) { xflags |= RT2860_TX_ACK; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) dur = rt2860_rates[ctl_ridx].sp_ack_dur; else dur = rt2860_rates[ctl_ridx].lp_ack_dur; USETW(wh->i_dur, dur); } /* reserve slots for mgmt packets, just in case */ if (sc->sc_epq[qid].tx_nfree < 3) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "tx ring %d is full\n", qid); return (-1); } data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next); sc->sc_epq[qid].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = qflags; txwi = (struct rt2860_txwi *)(txd + 1); txwi->xflags = xflags; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txwi->wcid = 0; else txwi->wcid = (vap->iv_opmode == IEEE80211_M_STA) ? 1 : RUN_AID2WCID(ni->ni_associd); /* clear leftover garbage bits */ txwi->flags = 0; txwi->txop = 0; data->m = m; data->ni = ni; data->ridx = ridx; run_set_tx_desc(sc, data); /* * The chip keeps track of 2 kind of Tx stats, * * TX_STAT_FIFO, for per WCID stats, and * * TX_STA_CNT0 for all-TX-in-one stats. * * To use FIFO stats, we need to store MCS into the driver-private * PacketID field. So that, we can tell whose stats when we read them. * We add 1 to the MCS because setting the PacketID field to 0 means * that we don't want feedback in TX_STAT_FIFO. * And, that's what we want for STA mode, since TX_STA_CNT0 does the job. * * FIFO stats doesn't count Tx with WCID 0xff, so we do this in run_tx(). */ if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { uint16_t pid = (rt2860_rates[ridx].mcs + 1) & 0xf; txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); /* * Unlike PCI based devices, we don't get any interrupt from * USB devices, so we simulate FIFO-is-full interrupt here. * Ralink recommends to drain FIFO stats every 100 ms, but 16 slots * quickly get fulled. To prevent overflow, increment a counter on * every FIFO stat request, so we know how many slots are left. * We do this only in HOSTAP or multiple vap mode since FIFO stats * are used only in those modes. * We just drain stats. AMRR gets updated every 1 sec by * run_ratectl_cb() via callout. * Call it early. Otherwise overflow. */ if (sc->fifo_cnt++ == 10) { /* * With multiple vaps or if_bridge, if_start() is called * with a non-sleepable lock, tcpinp. So, need to defer. */ uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_drain_fifo; sc->cmdq[i].arg0 = sc; ieee80211_runtask(ic, &sc->cmdq_task); } } STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[qid]); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending data frame len=%d rate=%d qid=%d\n", m->m_pkthdr.len + (int)(sizeof(struct rt2870_txd) + sizeof(struct rt2860_txwi)), rt2860_rates[ridx].rate, qid); return (0); } static int run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = &sc->sc_ic; struct run_node *rn = RUN_NODE(ni); struct run_tx_data *data; struct ieee80211_frame *wh; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint16_t dur; uint8_t ridx = rn->mgt_ridx; uint8_t type; uint8_t xflags = 0; uint8_t wflags = 0; RUN_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) wflags |= RT2860_TX_TS; else if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { xflags |= RT2860_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } if (sc->sc_epq[0].tx_nfree == 0) /* let caller free mbuf */ return (EIO); data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->flags = wflags; txwi->xflags = xflags; txwi->txop = 0; /* clear leftover garbage bits */ data->m = m; data->ni = ni; data->ridx = ridx; run_set_tx_desc(sc, data); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n", m->m_pkthdr.len + (int)(sizeof(struct rt2870_txd) + sizeof(struct rt2860_txwi)), rt2860_rates[ridx].rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_sendprot(struct run_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; - struct ieee80211_frame *wh; struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; struct mbuf *mprot; int ridx; int protrate; - int ackrate; - int pktlen; - int isshort; - uint16_t dur; - uint8_t type; uint8_t wflags = 0; uint8_t xflags = 0; RUN_LOCK_ASSERT(sc, MA_OWNED); - KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, - ("protection %d", prot)); - - wh = mtod(m, struct ieee80211_frame *); - pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; - type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; - - protrate = ieee80211_ctl_rate(ic->ic_rt, rate); - ackrate = ieee80211_ack_rate(ic->ic_rt, rate); - - isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; - dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); - wflags = RT2860_TX_FRAG; - /* check that there are free slots before allocating the mbuf */ if (sc->sc_epq[0].tx_nfree == 0) /* let caller free mbuf */ return (ENOBUFS); - if (prot == IEEE80211_PROT_RTSCTS) { - /* NB: CTS is the same size as an ACK */ - dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); - xflags |= RT2860_TX_ACK; - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); - } else { - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); - } + mprot = ieee80211_alloc_prot(ni, m, rate, prot); if (mprot == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "could not allocate mbuf\n"); return (ENOBUFS); } + + protrate = ieee80211_ctl_rate(ic->ic_rt, rate); + wflags = RT2860_TX_FRAG; + xflags = 0; + if (prot == IEEE80211_PROT_RTSCTS) + xflags |= RT2860_TX_ACK; data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->flags = wflags; txwi->xflags = xflags; txwi->txop = 0; /* clear leftover garbage bits */ data->m = mprot; data->ni = ieee80211_ref_node(ni); for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == protrate) break; data->ridx = ridx; run_set_tx_desc(sc, data); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending prot len=%u rate=%u\n", m->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct run_tx_data *data; struct rt2870_txd *txd; struct rt2860_txwi *txwi; uint8_t type; uint8_t ridx; uint8_t rate; uint8_t opflags = 0; uint8_t xflags = 0; int error; RUN_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* let caller free mbuf */ return (EINVAL); } if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) xflags |= RT2860_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = run_sendprot(sc, m, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { /* let caller free mbuf */ return error; } opflags |= /*XXX RT2573_TX_LONG_RETRY |*/ RT2860_TX_TXOP_SIFS; } if (sc->sc_epq[0].tx_nfree == 0) { /* let caller free mbuf */ RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending raw frame, but tx ring is full\n"); return (EIO); } data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); sc->sc_epq[0].tx_nfree--; txd = (struct rt2870_txd *)&data->desc; txd->flags = RT2860_TX_QSEL_EDCA; txwi = (struct rt2860_txwi *)(txd + 1); txwi->wcid = 0xff; txwi->xflags = xflags; txwi->txop = opflags; txwi->flags = 0; /* clear leftover garbage bits */ data->m = m; data->ni = ni; for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++) if (rt2860_rates[ridx].rate == rate) break; data->ridx = ridx; run_set_tx_desc(sc, data); RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n", m->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); usbd_transfer_start(sc->sc_xfer[0]); return (0); } static int run_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct run_softc *sc = ni->ni_ic->ic_softc; int error = 0; RUN_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RUN_RUNNING)) { error = ENETDOWN; goto done; } if (params == NULL) { /* tx mgt packet */ if ((error = run_tx_mgt(sc, m, ni)) != 0) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "mgt tx failed\n"); goto done; } } else { /* tx raw packet with param */ if ((error = run_tx_param(sc, m, ni, params)) != 0) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "tx with param failed\n"); goto done; } } done: RUN_UNLOCK(sc); if (error != 0) { if(m != NULL) m_freem(m); } return (error); } static int run_transmit(struct ieee80211com *ic, struct mbuf *m) { struct run_softc *sc = ic->ic_softc; int error; RUN_LOCK(sc); if ((sc->sc_flags & RUN_RUNNING) == 0) { RUN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RUN_UNLOCK(sc); return (error); } run_start(sc); RUN_UNLOCK(sc); return (0); } static void run_start(struct run_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RUN_LOCK_ASSERT(sc, MA_OWNED); if ((sc->sc_flags & RUN_RUNNING) == 0) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (run_tx(sc, m, ni) != 0) { mbufq_prepend(&sc->sc_snd, m); break; } } } static void run_parent(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; int startall = 0; RUN_LOCK(sc); if (sc->sc_detached) { RUN_UNLOCK(sc); return; } if (ic->ic_nrunning > 0) { if (!(sc->sc_flags & RUN_RUNNING)) { startall = 1; run_init_locked(sc); } else run_update_promisc_locked(sc); } else if ((sc->sc_flags & RUN_RUNNING) && sc->rvp_cnt <= 1) run_stop(sc); RUN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void run_iq_calib(struct run_softc *sc, u_int chan) { uint16_t val; /* Tx0 IQ gain. */ run_bbp_write(sc, 158, 0x2c); if (chan <= 14) run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_2GHZ, &val, 1); else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx0 IQ phase. */ run_bbp_write(sc, 158, 0x2d); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx1 IQ gain. */ run_bbp_write(sc, 158, 0x4a); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* Tx1 IQ phase. */ run_bbp_write(sc, 158, 0x4b); if (chan <= 14) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_2GHZ, &val, 1); } else if (chan <= 64) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5GHZ, &val, 1); } else if (chan <= 138) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5GHZ, &val, 1); } else if (chan <= 165) { run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5GHZ, &val, 1); } else val = 0; run_bbp_write(sc, 159, val); /* RF IQ compensation control. */ run_bbp_write(sc, 158, 0x04); run_efuse_read(sc, RT5390_EEPROM_RF_IQ_COMPENSATION_CTL, &val, 1); run_bbp_write(sc, 159, val); /* RF IQ imbalance compensation control. */ run_bbp_write(sc, 158, 0x03); run_efuse_read(sc, RT5390_EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CTL, &val, 1); run_bbp_write(sc, 159, val); } static void run_set_agc(struct run_softc *sc, uint8_t agc) { uint8_t bbp; if (sc->mac_ver == 0x3572) { run_bbp_read(sc, 27, &bbp); bbp &= ~(0x3 << 5); run_bbp_write(sc, 27, bbp | 0 << 5); /* select Rx0 */ run_bbp_write(sc, 66, agc); run_bbp_write(sc, 27, bbp | 1 << 5); /* select Rx1 */ run_bbp_write(sc, 66, agc); } else run_bbp_write(sc, 66, agc); } static void run_select_chan_group(struct run_softc *sc, int group) { uint32_t tmp; uint8_t agc; run_bbp_write(sc, 62, 0x37 - sc->lna[group]); run_bbp_write(sc, 63, 0x37 - sc->lna[group]); run_bbp_write(sc, 64, 0x37 - sc->lna[group]); if (sc->mac_ver < 0x3572) run_bbp_write(sc, 86, 0x00); if (sc->mac_ver == 0x3593) { run_bbp_write(sc, 77, 0x98); run_bbp_write(sc, 83, (group == 0) ? 0x8a : 0x9a); } if (group == 0) { if (sc->ext_2ghz_lna) { if (sc->mac_ver >= 0x5390) run_bbp_write(sc, 75, 0x52); else { run_bbp_write(sc, 82, 0x62); run_bbp_write(sc, 75, 0x46); } } else { if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 79, 0x1c); run_bbp_write(sc, 80, 0x0e); run_bbp_write(sc, 81, 0x3a); run_bbp_write(sc, 82, 0x62); run_bbp_write(sc, 195, 0x80); run_bbp_write(sc, 196, 0xe0); run_bbp_write(sc, 195, 0x81); run_bbp_write(sc, 196, 0x1f); run_bbp_write(sc, 195, 0x82); run_bbp_write(sc, 196, 0x38); run_bbp_write(sc, 195, 0x83); run_bbp_write(sc, 196, 0x32); run_bbp_write(sc, 195, 0x85); run_bbp_write(sc, 196, 0x28); run_bbp_write(sc, 195, 0x86); run_bbp_write(sc, 196, 0x19); } else if (sc->mac_ver >= 0x5390) run_bbp_write(sc, 75, 0x50); else { run_bbp_write(sc, 82, (sc->mac_ver == 0x3593) ? 0x62 : 0x84); run_bbp_write(sc, 75, 0x50); } } } else { if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 79, 0x18); run_bbp_write(sc, 80, 0x08); run_bbp_write(sc, 81, 0x38); run_bbp_write(sc, 82, 0x92); run_bbp_write(sc, 195, 0x80); run_bbp_write(sc, 196, 0xf0); run_bbp_write(sc, 195, 0x81); run_bbp_write(sc, 196, 0x1e); run_bbp_write(sc, 195, 0x82); run_bbp_write(sc, 196, 0x28); run_bbp_write(sc, 195, 0x83); run_bbp_write(sc, 196, 0x20); run_bbp_write(sc, 195, 0x85); run_bbp_write(sc, 196, 0x7f); run_bbp_write(sc, 195, 0x86); run_bbp_write(sc, 196, 0x7f); } else if (sc->mac_ver == 0x3572) run_bbp_write(sc, 82, 0x94); else run_bbp_write(sc, 82, (sc->mac_ver == 0x3593) ? 0x82 : 0xf2); if (sc->ext_5ghz_lna) run_bbp_write(sc, 75, 0x46); else run_bbp_write(sc, 75, 0x50); } run_read(sc, RT2860_TX_BAND_CFG, &tmp); tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P); tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P; run_write(sc, RT2860_TX_BAND_CFG, tmp); /* enable appropriate Power Amplifiers and Low Noise Amplifiers */ tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN; if (sc->mac_ver == 0x3593) tmp |= 1 << 29 | 1 << 28; if (sc->nrxchains > 1) tmp |= RT2860_LNA_PE1_EN; if (group == 0) { /* 2GHz */ tmp |= RT2860_PA_PE_G0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_G1_EN; if (sc->mac_ver == 0x3593) { if (sc->ntxchains > 2) tmp |= 1 << 25; } } else { /* 5GHz */ tmp |= RT2860_PA_PE_A0_EN; if (sc->ntxchains > 1) tmp |= RT2860_PA_PE_A1_EN; } if (sc->mac_ver == 0x3572) { run_rt3070_rf_write(sc, 8, 0x00); run_write(sc, RT2860_TX_PIN_CFG, tmp); run_rt3070_rf_write(sc, 8, 0x80); } else run_write(sc, RT2860_TX_PIN_CFG, tmp); if (sc->mac_ver == 0x5592) { run_bbp_write(sc, 195, 0x8d); run_bbp_write(sc, 196, 0x1a); } if (sc->mac_ver == 0x3593) { run_read(sc, RT2860_GPIO_CTRL, &tmp); tmp &= ~0x01010000; if (group == 0) tmp |= 0x00010000; tmp = (tmp & ~0x00009090) | 0x00000090; run_write(sc, RT2860_GPIO_CTRL, tmp); } /* set initial AGC value */ if (group == 0) { /* 2GHz band */ if (sc->mac_ver >= 0x3070) agc = 0x1c + sc->lna[0] * 2; else agc = 0x2e + sc->lna[0]; } else { /* 5GHz band */ if (sc->mac_ver == 0x5592) agc = 0x24 + sc->lna[group] * 2; else if (sc->mac_ver == 0x3572 || sc->mac_ver == 0x3593) agc = 0x22 + (sc->lna[group] * 5) / 3; else agc = 0x32 + (sc->lna[group] * 5) / 3; } run_set_agc(sc, agc); } static void run_rt2870_set_chan(struct run_softc *sc, u_int chan) { const struct rfprog *rfprog = rt2860_rf2850; uint32_t r2, r3, r4; int8_t txpow1, txpow2; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); r2 = rfprog[i].r2; if (sc->ntxchains == 1) r2 |= 1 << 14; /* 1T: disable Tx chain 2 */ if (sc->nrxchains == 1) r2 |= 1 << 17 | 1 << 6; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) r2 |= 1 << 6; /* 2R: disable Rx chain 3 */ /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; /* Initialize RF R3 and R4. */ r3 = rfprog[i].r3 & 0xffffc1ff; r4 = (rfprog[i].r4 & ~(0x001f87c0)) | (sc->freq << 15); if (chan > 14) { if (txpow1 >= 0) { txpow1 = (txpow1 > 0xf) ? (0xf) : (txpow1); r3 |= (txpow1 << 10) | (1 << 9); } else { txpow1 += 7; /* txpow1 is not possible larger than 15. */ r3 |= (txpow1 << 10); } if (txpow2 >= 0) { txpow2 = (txpow2 > 0xf) ? (0xf) : (txpow2); r4 |= (txpow2 << 7) | (1 << 6); } else { txpow2 += 7; r4 |= (txpow2 << 7); } } else { /* Set Tx0 power. */ r3 |= (txpow1 << 9); /* Set frequency offset and Tx1 power. */ r4 |= (txpow2 << 6); } run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 & ~(1 << 2)); run_rt2870_rf_write(sc, r4); run_delay(sc, 10); run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 | (1 << 2)); run_rt2870_rf_write(sc, r4); run_delay(sc, 10); run_rt2870_rf_write(sc, rfprog[i].r1); run_rt2870_rf_write(sc, r2); run_rt2870_rf_write(sc, r3 & ~(1 << 2)); run_rt2870_rf_write(sc, r4); } static void run_rt3070_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n); /* RT3370/RT3390: RF R3 [7:4] is not reserved bits. */ run_rt3070_rf_read(sc, 3, &rf); rf = (rf & ~0x0f) | rt3070_freqs[i].k; run_rt3070_rf_write(sc, 3, rf); run_rt3070_rf_read(sc, 6, &rf); rf = (rf & ~0x03) | rt3070_freqs[i].r; run_rt3070_rf_write(sc, 6, rf); /* set Tx0 power */ run_rt3070_rf_read(sc, 12, &rf); rf = (rf & ~0x1f) | txpow1; run_rt3070_rf_write(sc, 12, rf); /* set Tx1 power */ run_rt3070_rf_read(sc, 13, &rf); rf = (rf & ~0x1f) | txpow2; run_rt3070_rf_write(sc, 13, rf); run_rt3070_rf_read(sc, 1, &rf); rf &= ~0xfc; if (sc->ntxchains == 1) rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */ else if (sc->ntxchains == 2) rf |= 1 << 7; /* 2T: disable Tx chain 3 */ if (sc->nrxchains == 1) rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) rf |= 1 << 6; /* 2R: disable Rx chain 3 */ run_rt3070_rf_write(sc, 1, rf); /* set RF offset */ run_rt3070_rf_read(sc, 23, &rf); rf = (rf & ~0x7f) | sc->freq; run_rt3070_rf_write(sc, 23, rf); /* program RF filter */ run_rt3070_rf_read(sc, 24, &rf); /* Tx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; run_rt3070_rf_write(sc, 24, rf); run_rt3070_rf_read(sc, 31, &rf); /* Rx */ rf = (rf & ~0x3f) | sc->rf24_20mhz; run_rt3070_rf_write(sc, 31, rf); /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); run_rt3070_rf_write(sc, 7, rf | 0x01); } static void run_rt3572_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint32_t tmp; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; if (chan <= 14) { run_bbp_write(sc, 25, sc->bbp25); run_bbp_write(sc, 26, sc->bbp26); } else { /* enable IQ phase correction */ run_bbp_write(sc, 25, 0x09); run_bbp_write(sc, 26, 0xff); } run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 3, rt3070_freqs[i].k); run_rt3070_rf_read(sc, 6, &rf); rf = (rf & ~0x0f) | rt3070_freqs[i].r; rf |= (chan <= 14) ? 0x08 : 0x04; run_rt3070_rf_write(sc, 6, rf); /* set PLL mode */ run_rt3070_rf_read(sc, 5, &rf); rf &= ~(0x08 | 0x04); rf |= (chan <= 14) ? 0x04 : 0x08; run_rt3070_rf_write(sc, 5, rf); /* set Tx power for chain 0 */ if (chan <= 14) rf = 0x60 | txpow1; else rf = 0xe0 | (txpow1 & 0xc) << 1 | (txpow1 & 0x3); run_rt3070_rf_write(sc, 12, rf); /* set Tx power for chain 1 */ if (chan <= 14) rf = 0x60 | txpow2; else rf = 0xe0 | (txpow2 & 0xc) << 1 | (txpow2 & 0x3); run_rt3070_rf_write(sc, 13, rf); /* set Tx/Rx streams */ run_rt3070_rf_read(sc, 1, &rf); rf &= ~0xfc; if (sc->ntxchains == 1) rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */ else if (sc->ntxchains == 2) rf |= 1 << 7; /* 2T: disable Tx chain 3 */ if (sc->nrxchains == 1) rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ else if (sc->nrxchains == 2) rf |= 1 << 6; /* 2R: disable Rx chain 3 */ run_rt3070_rf_write(sc, 1, rf); /* set RF offset */ run_rt3070_rf_read(sc, 23, &rf); rf = (rf & ~0x7f) | sc->freq; run_rt3070_rf_write(sc, 23, rf); /* program RF filter */ rf = sc->rf24_20mhz; run_rt3070_rf_write(sc, 24, rf); /* Tx */ run_rt3070_rf_write(sc, 31, rf); /* Rx */ /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); rf = (chan <= 14) ? 0xd8 : ((rf & ~0xc8) | 0x14); run_rt3070_rf_write(sc, 7, rf); /* TSSI */ rf = (chan <= 14) ? 0xc3 : 0xc0; run_rt3070_rf_write(sc, 9, rf); /* set loop filter 1 */ run_rt3070_rf_write(sc, 10, 0xf1); /* set loop filter 2 */ run_rt3070_rf_write(sc, 11, (chan <= 14) ? 0xb9 : 0x00); /* set tx_mx2_ic */ run_rt3070_rf_write(sc, 15, (chan <= 14) ? 0x53 : 0x43); /* set tx_mx1_ic */ if (chan <= 14) rf = 0x48 | sc->txmixgain_2ghz; else rf = 0x78 | sc->txmixgain_5ghz; run_rt3070_rf_write(sc, 16, rf); /* set tx_lo1 */ run_rt3070_rf_write(sc, 17, 0x23); /* set tx_lo2 */ if (chan <= 14) rf = 0x93; else if (chan <= 64) rf = 0xb7; else if (chan <= 128) rf = 0x74; else rf = 0x72; run_rt3070_rf_write(sc, 19, rf); /* set rx_lo1 */ if (chan <= 14) rf = 0xb3; else if (chan <= 64) rf = 0xf6; else if (chan <= 128) rf = 0xf4; else rf = 0xf3; run_rt3070_rf_write(sc, 20, rf); /* set pfd_delay */ if (chan <= 14) rf = 0x15; else if (chan <= 64) rf = 0x3d; else rf = 0x01; run_rt3070_rf_write(sc, 25, rf); /* set rx_lo2 */ run_rt3070_rf_write(sc, 26, (chan <= 14) ? 0x85 : 0x87); /* set ldo_rf_vc */ run_rt3070_rf_write(sc, 27, (chan <= 14) ? 0x00 : 0x01); /* set drv_cc */ run_rt3070_rf_write(sc, 29, (chan <= 14) ? 0x9b : 0x9f); run_read(sc, RT2860_GPIO_CTRL, &tmp); tmp &= ~0x8080; if (chan <= 14) tmp |= 0x80; run_write(sc, RT2860_GPIO_CTRL, tmp); /* enable RF tuning */ run_rt3070_rf_read(sc, 7, &rf); run_rt3070_rf_write(sc, 7, rf | 0x01); run_delay(sc, 2); } static void run_rt3593_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2, txpow3; uint8_t h20mhz, rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; txpow3 = (sc->ntxchains == 3) ? sc->txpow3[i] : 0; if (chan <= 14) { run_bbp_write(sc, 25, sc->bbp25); run_bbp_write(sc, 26, sc->bbp26); } else { /* Enable IQ phase correction. */ run_bbp_write(sc, 25, 0x09); run_bbp_write(sc, 26, 0xff); } run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f); run_rt3070_rf_read(sc, 11, &rf); rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03); run_rt3070_rf_write(sc, 11, rf); /* Set pll_idoh. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x4c; rf |= (chan <= 14) ? 0x44 : 0x48; run_rt3070_rf_write(sc, 11, rf); if (chan <= 14) rf = txpow1 & 0x1f; else rf = 0x40 | ((txpow1 & 0x18) << 1) | (txpow1 & 0x07); run_rt3070_rf_write(sc, 53, rf); if (chan <= 14) rf = txpow2 & 0x1f; else rf = 0x40 | ((txpow2 & 0x18) << 1) | (txpow2 & 0x07); run_rt3070_rf_write(sc, 55, rf); if (chan <= 14) rf = txpow3 & 0x1f; else rf = 0x40 | ((txpow3 & 0x18) << 1) | (txpow3 & 0x07); run_rt3070_rf_write(sc, 54, rf); rf = RT3070_RF_BLOCK | RT3070_PLL_PD; if (sc->ntxchains == 3) rf |= RT3070_TX0_PD | RT3070_TX1_PD | RT3070_TX2_PD; else rf |= RT3070_TX0_PD | RT3070_TX1_PD; rf |= RT3070_RX0_PD | RT3070_RX1_PD | RT3070_RX2_PD; run_rt3070_rf_write(sc, 1, rf); run_adjust_freq_offset(sc); run_rt3070_rf_write(sc, 31, (chan <= 14) ? 0xa0 : 0x80); h20mhz = (sc->rf24_20mhz & 0x20) >> 5; run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x06) | (h20mhz << 1) | (h20mhz << 2); run_rt3070_rf_write(sc, 30, rf); run_rt3070_rf_read(sc, 36, &rf); if (chan <= 14) rf |= 0x80; else rf &= ~0x80; run_rt3070_rf_write(sc, 36, rf); /* Set vcolo_bs. */ run_rt3070_rf_write(sc, 34, (chan <= 14) ? 0x3c : 0x20); /* Set pfd_delay. */ run_rt3070_rf_write(sc, 12, (chan <= 14) ? 0x1a : 0x12); /* Set vco bias current control. */ run_rt3070_rf_read(sc, 6, &rf); rf &= ~0xc0; if (chan <= 14) rf |= 0x40; else if (chan <= 128) rf |= 0x80; else rf |= 0x40; run_rt3070_rf_write(sc, 6, rf); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); run_rt3070_rf_write(sc, 10, (chan <= 14) ? 0xd3 : 0xd8); run_rt3070_rf_write(sc, 13, (chan <= 14) ? 0x12 : 0x23); run_rt3070_rf_read(sc, 51, &rf); rf = (rf & ~0x03) | 0x01; run_rt3070_rf_write(sc, 51, rf); /* Set tx_mx1_cc. */ run_rt3070_rf_read(sc, 51, &rf); rf &= ~0x1c; rf |= (chan <= 14) ? 0x14 : 0x10; run_rt3070_rf_write(sc, 51, rf); /* Set tx_mx1_ic. */ run_rt3070_rf_read(sc, 51, &rf); rf &= ~0xe0; rf |= (chan <= 14) ? 0x60 : 0x40; run_rt3070_rf_write(sc, 51, rf); /* Set tx_lo1_ic. */ run_rt3070_rf_read(sc, 49, &rf); rf &= ~0x1c; rf |= (chan <= 14) ? 0x0c : 0x08; run_rt3070_rf_write(sc, 49, rf); /* Set tx_lo1_en. */ run_rt3070_rf_read(sc, 50, &rf); run_rt3070_rf_write(sc, 50, rf & ~0x20); /* Set drv_cc. */ run_rt3070_rf_read(sc, 57, &rf); rf &= ~0xfc; rf |= (chan <= 14) ? 0x6c : 0x3c; run_rt3070_rf_write(sc, 57, rf); /* Set rx_mix1_ic, rxa_lnactr, lna_vc, lna_inbias_en and lna_en. */ run_rt3070_rf_write(sc, 44, (chan <= 14) ? 0x93 : 0x9b); /* Set drv_gnd_a, tx_vga_cc_a and tx_mx2_gain. */ run_rt3070_rf_write(sc, 52, (chan <= 14) ? 0x45 : 0x05); /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf &= ~RT5390_VCOCAL; rf |= (chan <= 14) ? RT5390_VCOCAL : 0xbe; run_rt3070_rf_write(sc, 3, rf); if (chan <= 14) rf = 0x23; else if (chan <= 64) rf = 0x36; else if (chan <= 128) rf = 0x32; else rf = 0x30; run_rt3070_rf_write(sc, 39, rf); if (chan <= 14) rf = 0xbb; else if (chan <= 64) rf = 0xeb; else if (chan <= 128) rf = 0xb3; else rf = 0x9b; run_rt3070_rf_write(sc, 45, rf); /* Set FEQ/AEQ control. */ run_bbp_write(sc, 105, 0x34); } static void run_rt5390_set_chan(struct run_softc *sc, u_int chan) { int8_t txpow1, txpow2; uint8_t rf; int i; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n); run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f); run_rt3070_rf_read(sc, 11, &rf); rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03); run_rt3070_rf_write(sc, 11, rf); run_rt3070_rf_read(sc, 49, &rf); rf = (rf & ~0x3f) | (txpow1 & 0x3f); /* The valid range of the RF R49 is 0x00 to 0x27. */ if ((rf & 0x3f) > 0x27) rf = (rf & ~0x3f) | 0x27; run_rt3070_rf_write(sc, 49, rf); if (sc->mac_ver == 0x5392) { run_rt3070_rf_read(sc, 50, &rf); rf = (rf & ~0x3f) | (txpow2 & 0x3f); /* The valid range of the RF R50 is 0x00 to 0x27. */ if ((rf & 0x3f) > 0x27) rf = (rf & ~0x3f) | 0x27; run_rt3070_rf_write(sc, 50, rf); } run_rt3070_rf_read(sc, 1, &rf); rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD; if (sc->mac_ver == 0x5392) rf |= RT3070_RX1_PD | RT3070_TX1_PD; run_rt3070_rf_write(sc, 1, rf); if (sc->mac_ver != 0x5392) { run_rt3070_rf_read(sc, 2, &rf); rf |= 0x80; run_rt3070_rf_write(sc, 2, rf); run_delay(sc, 10); rf &= 0x7f; run_rt3070_rf_write(sc, 2, rf); } run_adjust_freq_offset(sc); if (sc->mac_ver == 0x5392) { /* Fix for RT5392C. */ if (sc->mac_rev >= 0x0223) { if (chan <= 4) rf = 0x0f; else if (chan >= 5 && chan <= 7) rf = 0x0e; else rf = 0x0d; run_rt3070_rf_write(sc, 23, rf); if (chan <= 4) rf = 0x0c; else if (chan == 5) rf = 0x0b; else if (chan >= 6 && chan <= 7) rf = 0x0a; else if (chan >= 8 && chan <= 10) rf = 0x09; else rf = 0x08; run_rt3070_rf_write(sc, 59, rf); } else { if (chan <= 11) rf = 0x0f; else rf = 0x0b; run_rt3070_rf_write(sc, 59, rf); } } else { /* Fix for RT5390F. */ if (sc->mac_rev >= 0x0502) { if (chan <= 11) rf = 0x43; else rf = 0x23; run_rt3070_rf_write(sc, 55, rf); if (chan <= 11) rf = 0x0f; else if (chan == 12) rf = 0x0d; else rf = 0x0b; run_rt3070_rf_write(sc, 59, rf); } else { run_rt3070_rf_write(sc, 55, 0x44); run_rt3070_rf_write(sc, 59, 0x8f); } } /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf |= RT5390_VCOCAL; run_rt3070_rf_write(sc, 3, rf); } static void run_rt5592_set_chan(struct run_softc *sc, u_int chan) { const struct rt5592_freqs *freqs; uint32_t tmp; uint8_t reg, rf, txpow_bound; int8_t txpow1, txpow2; int i; run_read(sc, RT5592_DEBUG_INDEX, &tmp); freqs = (tmp & RT5592_SEL_XTAL) ? rt5592_freqs_40mhz : rt5592_freqs_20mhz; /* find the settings for this channel (we know it exists) */ for (i = 0; rt2860_rf2850[i].chan != chan; i++, freqs++); /* use Tx power values from EEPROM */ txpow1 = sc->txpow1[i]; txpow2 = sc->txpow2[i]; run_read(sc, RT3070_LDO_CFG0, &tmp); tmp &= ~0x1c000000; if (chan > 14) tmp |= 0x14000000; run_write(sc, RT3070_LDO_CFG0, tmp); /* N setting. */ run_rt3070_rf_write(sc, 8, freqs->n & 0xff); run_rt3070_rf_read(sc, 9, &rf); rf &= ~(1 << 4); rf |= ((freqs->n & 0x0100) >> 8) << 4; run_rt3070_rf_write(sc, 9, rf); /* K setting. */ run_rt3070_rf_read(sc, 9, &rf); rf &= ~0x0f; rf |= (freqs->k & 0x0f); run_rt3070_rf_write(sc, 9, rf); /* Mode setting. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x0c; rf |= ((freqs->m - 0x8) & 0x3) << 2; run_rt3070_rf_write(sc, 11, rf); run_rt3070_rf_read(sc, 9, &rf); rf &= ~(1 << 7); rf |= (((freqs->m - 0x8) & 0x4) >> 2) << 7; run_rt3070_rf_write(sc, 9, rf); /* R setting. */ run_rt3070_rf_read(sc, 11, &rf); rf &= ~0x03; rf |= (freqs->r - 0x1); run_rt3070_rf_write(sc, 11, rf); if (chan <= 14) { /* Initialize RF registers for 2GHZ. */ for (i = 0; i < nitems(rt5592_2ghz_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_2ghz_def_rf[i].reg, rt5592_2ghz_def_rf[i].val); } rf = (chan <= 10) ? 0x07 : 0x06; run_rt3070_rf_write(sc, 23, rf); run_rt3070_rf_write(sc, 59, rf); run_rt3070_rf_write(sc, 55, 0x43); /* * RF R49/R50 Tx power ALC code. * G-band bit<7:6>=1:0, bit<5:0> range from 0x0 ~ 0x27. */ reg = 2; txpow_bound = 0x27; } else { /* Initialize RF registers for 5GHZ. */ for (i = 0; i < nitems(rt5592_5ghz_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_5ghz_def_rf[i].reg, rt5592_5ghz_def_rf[i].val); } for (i = 0; i < nitems(rt5592_chan_5ghz); i++) { if (chan >= rt5592_chan_5ghz[i].firstchan && chan <= rt5592_chan_5ghz[i].lastchan) { run_rt3070_rf_write(sc, rt5592_chan_5ghz[i].reg, rt5592_chan_5ghz[i].val); } } /* * RF R49/R50 Tx power ALC code. * A-band bit<7:6>=1:1, bit<5:0> range from 0x0 ~ 0x2b. */ reg = 3; txpow_bound = 0x2b; } /* RF R49 ch0 Tx power ALC code. */ run_rt3070_rf_read(sc, 49, &rf); rf &= ~0xc0; rf |= (reg << 6); rf = (rf & ~0x3f) | (txpow1 & 0x3f); if ((rf & 0x3f) > txpow_bound) rf = (rf & ~0x3f) | txpow_bound; run_rt3070_rf_write(sc, 49, rf); /* RF R50 ch1 Tx power ALC code. */ run_rt3070_rf_read(sc, 50, &rf); rf &= ~(1 << 7 | 1 << 6); rf |= (reg << 6); rf = (rf & ~0x3f) | (txpow2 & 0x3f); if ((rf & 0x3f) > txpow_bound) rf = (rf & ~0x3f) | txpow_bound; run_rt3070_rf_write(sc, 50, rf); /* Enable RF_BLOCK, PLL_PD, RX0_PD, and TX0_PD. */ run_rt3070_rf_read(sc, 1, &rf); rf |= (RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD); if (sc->ntxchains > 1) rf |= RT3070_TX1_PD; if (sc->nrxchains > 1) rf |= RT3070_RX1_PD; run_rt3070_rf_write(sc, 1, rf); run_rt3070_rf_write(sc, 6, 0xe4); run_rt3070_rf_write(sc, 30, 0x10); run_rt3070_rf_write(sc, 31, 0x80); run_rt3070_rf_write(sc, 32, 0x80); run_adjust_freq_offset(sc); /* Enable VCO calibration. */ run_rt3070_rf_read(sc, 3, &rf); rf |= RT5390_VCOCAL; run_rt3070_rf_write(sc, 3, rf); } static void run_set_rx_antenna(struct run_softc *sc, int aux) { uint32_t tmp; uint8_t bbp152; if (aux) { if (sc->rf_rev == RT5390_RF_5370) { run_bbp_read(sc, 152, &bbp152); run_bbp_write(sc, 152, bbp152 & ~0x80); } else { run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 0); run_read(sc, RT2860_GPIO_CTRL, &tmp); run_write(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08); } } else { if (sc->rf_rev == RT5390_RF_5370) { run_bbp_read(sc, 152, &bbp152); run_bbp_write(sc, 152, bbp152 | 0x80); } else { run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 1); run_read(sc, RT2860_GPIO_CTRL, &tmp); run_write(sc, RT2860_GPIO_CTRL, tmp & ~0x0808); } } } static int run_set_chan(struct run_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; u_int chan, group; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return (EINVAL); if (sc->mac_ver == 0x5592) run_rt5592_set_chan(sc, chan); else if (sc->mac_ver >= 0x5390) run_rt5390_set_chan(sc, chan); else if (sc->mac_ver == 0x3593) run_rt3593_set_chan(sc, chan); else if (sc->mac_ver == 0x3572) run_rt3572_set_chan(sc, chan); else if (sc->mac_ver >= 0x3070) run_rt3070_set_chan(sc, chan); else run_rt2870_set_chan(sc, chan); /* determine channel group */ if (chan <= 14) group = 0; else if (chan <= 64) group = 1; else if (chan <= 128) group = 2; else group = 3; /* XXX necessary only when group has changed! */ run_select_chan_group(sc, group); run_delay(sc, 10); /* Perform IQ calibration. */ if (sc->mac_ver >= 0x5392) run_iq_calib(sc, chan); return (0); } static void run_set_channel(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; RUN_LOCK(sc); run_set_chan(sc, ic->ic_curchan); RUN_UNLOCK(sc); return; } static void run_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct run_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, run_chan_2ghz, nitems(run_chan_2ghz), bands, 0); if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850 || sc->rf_rev == RT3070_RF_3052 || sc->rf_rev == RT3593_RF_3053 || sc->rf_rev == RT5592_RF_5592) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, run_chan_5ghz, nitems(run_chan_5ghz), bands, 0); } } static void run_scan_start(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; uint32_t tmp; RUN_LOCK(sc); /* abort TSF synchronization */ run_read(sc, RT2860_BCN_TIME_CFG, &tmp); run_write(sc, RT2860_BCN_TIME_CFG, tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN)); run_set_bssid(sc, ieee80211broadcastaddr); RUN_UNLOCK(sc); return; } static void run_scan_end(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; RUN_LOCK(sc); run_enable_tsf_sync(sc); run_set_bssid(sc, sc->sc_bssid); RUN_UNLOCK(sc); return; } /* * Could be called from ieee80211_node_timeout() * (non-sleepable thread) */ static void run_update_beacon(struct ieee80211vap *vap, int item) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; struct run_softc *sc = ic->ic_softc; struct run_vap *rvp = RUN_VAP(vap); int mcast = 0; uint32_t i; switch (item) { case IEEE80211_BEACON_ERP: run_updateslot(ic); break; case IEEE80211_BEACON_HTINFO: run_updateprot(ic); break; case IEEE80211_BEACON_TIM: mcast = 1; /*TODO*/ break; default: break; } setbit(bo->bo_flags, item); if (rvp->beacon_mbuf == NULL) { rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); if (rvp->beacon_mbuf == NULL) return; } ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast); i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_BEACON, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_update_beacon_cb; sc->cmdq[i].arg0 = vap; ieee80211_runtask(ic, &sc->cmdq_task); return; } static void run_update_beacon_cb(void *arg) { struct ieee80211vap *vap = arg; struct ieee80211_node *ni = vap->iv_bss; struct run_vap *rvp = RUN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct run_softc *sc = ic->ic_softc; struct rt2860_txwi txwi; struct mbuf *m; uint16_t txwisize; uint8_t ridx; if (ni->ni_chan == IEEE80211_CHAN_ANYC) return; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return; /* * No need to call ieee80211_beacon_update(), run_update_beacon() * is taking care of appropriate calls. */ if (rvp->beacon_mbuf == NULL) { rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); if (rvp->beacon_mbuf == NULL) return; } m = rvp->beacon_mbuf; memset(&txwi, 0, sizeof(txwi)); txwi.wcid = 0xff; txwi.len = htole16(m->m_pkthdr.len); /* send beacons at the lowest available rate */ ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; txwi.phy = htole16(rt2860_rates[ridx].mcs); if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) txwi.phy |= htole16(RT2860_PHY_OFDM); txwi.txop = RT2860_TX_TXOP_HT; txwi.flags = RT2860_TX_TS; txwi.xflags = RT2860_TX_NSEQ; txwisize = (sc->mac_ver == 0x5592) ? sizeof(txwi) + sizeof(uint32_t) : sizeof(txwi); run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id), (uint8_t *)&txwi, txwisize); run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id) + txwisize, mtod(m, uint8_t *), (m->m_pkthdr.len + 1) & ~1); } static void run_updateprot(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_BEACON, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_updateprot_cb; sc->cmdq[i].arg0 = ic; ieee80211_runtask(ic, &sc->cmdq_task); } static void run_updateprot_cb(void *arg) { struct ieee80211com *ic = arg; struct run_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; /* setup protection frame rate (MCS code) */ tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ? rt2860_rates[RT2860_RIDX_OFDM6].mcs | RT2860_PHY_OFDM : rt2860_rates[RT2860_RIDX_CCK11].mcs; /* CCK frames don't require protection */ run_write(sc, RT2860_CCK_PROT_CFG, tmp); if (ic->ic_flags & IEEE80211_F_USEPROT) { if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) tmp |= RT2860_PROT_CTRL_RTS_CTS; else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) tmp |= RT2860_PROT_CTRL_CTS; } run_write(sc, RT2860_OFDM_PROT_CFG, tmp); } static void run_usb_timeout_cb(void *arg) { struct ieee80211vap *vap = arg; struct run_softc *sc = vap->iv_ic->ic_softc; RUN_LOCK_ASSERT(sc, MA_OWNED); if(vap->iv_state == IEEE80211_S_RUN && vap->iv_opmode != IEEE80211_M_STA) run_reset_livelock(sc); else if (vap->iv_state == IEEE80211_S_SCAN) { RUN_DPRINTF(sc, RUN_DEBUG_USB | RUN_DEBUG_STATE, "timeout caused by scan\n"); /* cancel bgscan */ ieee80211_cancel_scan(vap); } else RUN_DPRINTF(sc, RUN_DEBUG_USB | RUN_DEBUG_STATE, "timeout by unknown cause\n"); } static void run_reset_livelock(struct run_softc *sc) { uint32_t tmp; RUN_LOCK_ASSERT(sc, MA_OWNED); /* * In IBSS or HostAP modes (when the hardware sends beacons), the MAC * can run into a livelock and start sending CTS-to-self frames like * crazy if protection is enabled. Reset MAC/BBP for a while */ run_read(sc, RT2860_DEBUG, &tmp); RUN_DPRINTF(sc, RUN_DEBUG_RESET, "debug reg %08x\n", tmp); if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) { RUN_DPRINTF(sc, RUN_DEBUG_RESET, "CTS-to-self livelock detected\n"); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST); run_delay(sc, 1); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); } } static void run_update_promisc_locked(struct run_softc *sc) { uint32_t tmp; run_read(sc, RT2860_RX_FILTR_CFG, &tmp); tmp |= RT2860_DROP_UC_NOME; if (sc->sc_ic.ic_promisc > 0) tmp &= ~RT2860_DROP_UC_NOME; run_write(sc, RT2860_RX_FILTR_CFG, tmp); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "%s promiscuous mode\n", (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving"); } static void run_update_promisc(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; if ((sc->sc_flags & RUN_RUNNING) == 0) return; RUN_LOCK(sc); run_update_promisc_locked(sc); RUN_UNLOCK(sc); } static void run_enable_tsf_sync(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; RUN_DPRINTF(sc, RUN_DEBUG_BEACON, "rvp_id=%d ic_opmode=%d\n", RUN_VAP(vap)->rvp_id, ic->ic_opmode); run_read(sc, RT2860_BCN_TIME_CFG, &tmp); tmp &= ~0x1fffff; tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN; if (ic->ic_opmode == IEEE80211_M_STA) { /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (ic->ic_opmode == IEEE80211_M_IBSS) { tmp |= RT2860_BCN_TX_EN; /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT; } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) { tmp |= RT2860_BCN_TX_EN; /* SYNC with nobody */ tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT; } else { RUN_DPRINTF(sc, RUN_DEBUG_BEACON, "Enabling TSF failed. undefined opmode\n"); return; } run_write(sc, RT2860_BCN_TIME_CFG, tmp); } static void run_enable_tsf(struct run_softc *sc) { uint32_t tmp; if (run_read(sc, RT2860_BCN_TIME_CFG, &tmp) == 0) { tmp &= ~(RT2860_BCN_TX_EN | RT2860_TBTT_TIMER_EN); tmp |= RT2860_TSF_TIMER_EN; run_write(sc, RT2860_BCN_TIME_CFG, tmp); } } static void run_get_tsf(struct run_softc *sc, uint64_t *buf) { run_read_region_1(sc, RT2860_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf)); } static void run_enable_mrr(struct run_softc *sc) { #define CCK(mcs) (mcs) #define OFDM(mcs) (1 << 3 | (mcs)) run_write(sc, RT2860_LG_FBK_CFG0, OFDM(6) << 28 | /* 54->48 */ OFDM(5) << 24 | /* 48->36 */ OFDM(4) << 20 | /* 36->24 */ OFDM(3) << 16 | /* 24->18 */ OFDM(2) << 12 | /* 18->12 */ OFDM(1) << 8 | /* 12-> 9 */ OFDM(0) << 4 | /* 9-> 6 */ OFDM(0)); /* 6-> 6 */ run_write(sc, RT2860_LG_FBK_CFG1, CCK(2) << 12 | /* 11->5.5 */ CCK(1) << 8 | /* 5.5-> 2 */ CCK(0) << 4 | /* 2-> 1 */ CCK(0)); /* 1-> 1 */ #undef OFDM #undef CCK } static void run_set_txpreamble(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; run_read(sc, RT2860_AUTO_RSP_CFG, &tmp); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2860_CCK_SHORT_EN; else tmp &= ~RT2860_CCK_SHORT_EN; run_write(sc, RT2860_AUTO_RSP_CFG, tmp); } static void run_set_basicrates(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* set basic rates mask */ if (ic->ic_curmode == IEEE80211_MODE_11B) run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x003); else if (ic->ic_curmode == IEEE80211_MODE_11A) run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x150); else /* 11g */ run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x15f); } static void run_set_leds(struct run_softc *sc, uint16_t which) { (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LEDS, which | (sc->leds & 0x7f)); } static void run_set_bssid(struct run_softc *sc, const uint8_t *bssid) { run_write(sc, RT2860_MAC_BSSID_DW0, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); run_write(sc, RT2860_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8); } static void run_set_macaddr(struct run_softc *sc, const uint8_t *addr) { run_write(sc, RT2860_MAC_ADDR_DW0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); run_write(sc, RT2860_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16); } static void run_updateslot(struct ieee80211com *ic) { struct run_softc *sc = ic->ic_softc; uint32_t i; i = RUN_CMDQ_GET(&sc->cmdq_store); RUN_DPRINTF(sc, RUN_DEBUG_BEACON, "cmdq_store=%d\n", i); sc->cmdq[i].func = run_updateslot_cb; sc->cmdq[i].arg0 = ic; ieee80211_runtask(ic, &sc->cmdq_task); return; } /* ARGSUSED */ static void run_updateslot_cb(void *arg) { struct ieee80211com *ic = arg; struct run_softc *sc = ic->ic_softc; uint32_t tmp; run_read(sc, RT2860_BKOFF_SLOT_CFG, &tmp); tmp &= ~0xff; tmp |= IEEE80211_GET_SLOTTIME(ic); run_write(sc, RT2860_BKOFF_SLOT_CFG, tmp); } static void run_update_mcast(struct ieee80211com *ic) { } static int8_t run_rssi2dbm(struct run_softc *sc, uint8_t rssi, uint8_t rxchain) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_channel *c = ic->ic_curchan; int delta; if (IEEE80211_IS_CHAN_5GHZ(c)) { u_int chan = ieee80211_chan2ieee(ic, c); delta = sc->rssi_5ghz[rxchain]; /* determine channel group */ if (chan <= 64) delta -= sc->lna[1]; else if (chan <= 128) delta -= sc->lna[2]; else delta -= sc->lna[3]; } else delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; return (-12 - delta - rssi); } static void run_rt5390_bbp_init(struct run_softc *sc) { u_int i; uint8_t bbp; /* Apply maximum likelihood detection for 2 stream case. */ run_bbp_read(sc, 105, &bbp); if (sc->nrxchains > 1) run_bbp_write(sc, 105, bbp | RT5390_MLD); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); if (sc->mac_ver == 0x5592) { for (i = 0; i < nitems(rt5592_def_bbp); i++) { run_bbp_write(sc, rt5592_def_bbp[i].reg, rt5592_def_bbp[i].val); } for (i = 0; i < nitems(rt5592_bbp_r196); i++) { run_bbp_write(sc, 195, i + 0x80); run_bbp_write(sc, 196, rt5592_bbp_r196[i]); } } else { for (i = 0; i < nitems(rt5390_def_bbp); i++) { run_bbp_write(sc, rt5390_def_bbp[i].reg, rt5390_def_bbp[i].val); } } if (sc->mac_ver == 0x5392) { run_bbp_write(sc, 88, 0x90); run_bbp_write(sc, 95, 0x9a); run_bbp_write(sc, 98, 0x12); run_bbp_write(sc, 106, 0x12); run_bbp_write(sc, 134, 0xd0); run_bbp_write(sc, 135, 0xf6); run_bbp_write(sc, 148, 0x84); } run_bbp_read(sc, 152, &bbp); run_bbp_write(sc, 152, bbp | 0x80); /* Fix BBP254 for RT5592C. */ if (sc->mac_ver == 0x5592 && sc->mac_rev >= 0x0221) { run_bbp_read(sc, 254, &bbp); run_bbp_write(sc, 254, bbp | 0x80); } /* Disable hardware antenna diversity. */ if (sc->mac_ver == 0x5390) run_bbp_write(sc, 154, 0); /* Initialize Rx CCK/OFDM frequency offset report. */ run_bbp_write(sc, 142, 1); run_bbp_write(sc, 143, 57); } static int run_bbp_init(struct run_softc *sc) { int i, error, ntries; uint8_t bbp0; /* wait for BBP to wake up */ for (ntries = 0; ntries < 20; ntries++) { if ((error = run_bbp_read(sc, 0, &bbp0)) != 0) return error; if (bbp0 != 0 && bbp0 != 0xff) break; } if (ntries == 20) return (ETIMEDOUT); /* initialize BBP registers to default values */ if (sc->mac_ver >= 0x5390) run_rt5390_bbp_init(sc); else { for (i = 0; i < nitems(rt2860_def_bbp); i++) { run_bbp_write(sc, rt2860_def_bbp[i].reg, rt2860_def_bbp[i].val); } } if (sc->mac_ver == 0x3593) { run_bbp_write(sc, 79, 0x13); run_bbp_write(sc, 80, 0x05); run_bbp_write(sc, 81, 0x33); run_bbp_write(sc, 86, 0x46); run_bbp_write(sc, 137, 0x0f); } /* fix BBP84 for RT2860E */ if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101) run_bbp_write(sc, 84, 0x19); if (sc->mac_ver >= 0x3070 && (sc->mac_ver != 0x3593 && sc->mac_ver != 0x5592)) { run_bbp_write(sc, 79, 0x13); run_bbp_write(sc, 80, 0x05); run_bbp_write(sc, 81, 0x33); } else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) { run_bbp_write(sc, 69, 0x16); run_bbp_write(sc, 73, 0x12); } return (0); } static int run_rt3070_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t bbp4, mingain, rf, target; u_int i; run_rt3070_rf_read(sc, 30, &rf); /* toggle RF R30 bit 7 */ run_rt3070_rf_write(sc, 30, rf | 0x80); run_delay(sc, 10); run_rt3070_rf_write(sc, 30, rf & ~0x80); /* initialize RF registers to default value */ if (sc->mac_ver == 0x3572) { for (i = 0; i < nitems(rt3572_def_rf); i++) { run_rt3070_rf_write(sc, rt3572_def_rf[i].reg, rt3572_def_rf[i].val); } } else { for (i = 0; i < nitems(rt3070_def_rf); i++) { run_rt3070_rf_write(sc, rt3070_def_rf[i].reg, rt3070_def_rf[i].val); } } if (sc->mac_ver == 0x3070 && sc->mac_rev < 0x0201) { /* * Change voltage from 1.2V to 1.35V for RT3070. * The DAC issue (RT3070_LDO_CFG0) has been fixed * in RT3070(F). */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x0f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); } else if (sc->mac_ver == 0x3071) { run_rt3070_rf_read(sc, 6, &rf); run_rt3070_rf_write(sc, 6, rf | 0x40); run_rt3070_rf_write(sc, 31, 0x14); run_read(sc, RT3070_LDO_CFG0, &tmp); tmp &= ~0x1f000000; if (sc->mac_rev < 0x0211) tmp |= 0x0d000000; /* 1.3V */ else tmp |= 0x01000000; /* 1.2V */ run_write(sc, RT3070_LDO_CFG0, tmp); /* patch LNA_PE_G1 */ run_read(sc, RT3070_GPIO_SWITCH, &tmp); run_write(sc, RT3070_GPIO_SWITCH, tmp & ~0x20); } else if (sc->mac_ver == 0x3572) { run_rt3070_rf_read(sc, 6, &rf); run_rt3070_rf_write(sc, 6, rf | 0x40); /* increase voltage from 1.2V to 1.35V */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x1f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); if (sc->mac_rev < 0x0211 || !sc->patch_dac) { run_delay(sc, 1); /* wait for 1msec */ /* decrease voltage back to 1.2V */ tmp = (tmp & ~0x1f000000) | 0x01000000; run_write(sc, RT3070_LDO_CFG0, tmp); } } /* select 20MHz bandwidth */ run_rt3070_rf_read(sc, 31, &rf); run_rt3070_rf_write(sc, 31, rf & ~0x20); /* calibrate filter for 20MHz bandwidth */ sc->rf24_20mhz = 0x1f; /* default value */ target = (sc->mac_ver < 0x3071) ? 0x16 : 0x13; run_rt3070_filter_calib(sc, 0x07, target, &sc->rf24_20mhz); /* select 40MHz bandwidth */ run_bbp_read(sc, 4, &bbp4); run_bbp_write(sc, 4, (bbp4 & ~0x18) | 0x10); run_rt3070_rf_read(sc, 31, &rf); run_rt3070_rf_write(sc, 31, rf | 0x20); /* calibrate filter for 40MHz bandwidth */ sc->rf24_40mhz = 0x2f; /* default value */ target = (sc->mac_ver < 0x3071) ? 0x19 : 0x15; run_rt3070_filter_calib(sc, 0x27, target, &sc->rf24_40mhz); /* go back to 20MHz bandwidth */ run_bbp_read(sc, 4, &bbp4); run_bbp_write(sc, 4, bbp4 & ~0x18); if (sc->mac_ver == 0x3572) { /* save default BBP registers 25 and 26 values */ run_bbp_read(sc, 25, &sc->bbp25); run_bbp_read(sc, 26, &sc->bbp26); } else if (sc->mac_rev < 0x0201 || sc->mac_rev < 0x0211) run_rt3070_rf_write(sc, 27, 0x03); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) { run_rt3070_rf_read(sc, 17, &rf); rf &= ~RT3070_TX_LO1; if ((sc->mac_ver == 0x3070 || (sc->mac_ver == 0x3071 && sc->mac_rev >= 0x0211)) && !sc->ext_2ghz_lna) rf |= 0x20; /* fix for long range Rx issue */ mingain = (sc->mac_ver == 0x3070) ? 1 : 2; if (sc->txmixgain_2ghz >= mingain) rf = (rf & ~0x7) | sc->txmixgain_2ghz; run_rt3070_rf_write(sc, 17, rf); } if (sc->mac_ver == 0x3071) { run_rt3070_rf_read(sc, 1, &rf); rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD); rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD; run_rt3070_rf_write(sc, 1, rf); run_rt3070_rf_read(sc, 15, &rf); run_rt3070_rf_write(sc, 15, rf & ~RT3070_TX_LO2); run_rt3070_rf_read(sc, 20, &rf); run_rt3070_rf_write(sc, 20, rf & ~RT3070_RX_LO1); run_rt3070_rf_read(sc, 21, &rf); run_rt3070_rf_write(sc, 21, rf & ~RT3070_RX_LO2); } if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) { /* fix Tx to Rx IQ glitch by raising RF voltage */ run_rt3070_rf_read(sc, 27, &rf); rf &= ~0x77; if (sc->mac_rev < 0x0211) rf |= 0x03; run_rt3070_rf_write(sc, 27, rf); } return (0); } static void run_rt3593_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t rf; u_int i; /* Disable the GPIO bits 4 and 7 for LNA PE control. */ run_read(sc, RT3070_GPIO_SWITCH, &tmp); tmp &= ~(1 << 4 | 1 << 7); run_write(sc, RT3070_GPIO_SWITCH, tmp); /* Initialize RF registers to default value. */ for (i = 0; i < nitems(rt3593_def_rf); i++) { run_rt3070_rf_write(sc, rt3593_def_rf[i].reg, rt3593_def_rf[i].val); } /* Toggle RF R2 to initiate calibration. */ run_rt3070_rf_write(sc, 2, RT5390_RESCAL); /* Initialize RF frequency offset. */ run_adjust_freq_offset(sc); run_rt3070_rf_read(sc, 18, &rf); run_rt3070_rf_write(sc, 18, rf | RT3593_AUTOTUNE_BYPASS); /* * Increase voltage from 1.2V to 1.35V, wait for 1 msec to * decrease voltage back to 1.2V. */ run_read(sc, RT3070_LDO_CFG0, &tmp); tmp = (tmp & ~0x1f000000) | 0x0d000000; run_write(sc, RT3070_LDO_CFG0, tmp); run_delay(sc, 1); tmp = (tmp & ~0x1f000000) | 0x01000000; run_write(sc, RT3070_LDO_CFG0, tmp); sc->rf24_20mhz = 0x1f; sc->rf24_40mhz = 0x2f; /* Save default BBP registers 25 and 26 values. */ run_bbp_read(sc, 25, &sc->bbp25); run_bbp_read(sc, 26, &sc->bbp26); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); } static void run_rt5390_rf_init(struct run_softc *sc) { uint32_t tmp; uint8_t rf; u_int i; /* Toggle RF R2 to initiate calibration. */ if (sc->mac_ver == 0x5390) { run_rt3070_rf_read(sc, 2, &rf); run_rt3070_rf_write(sc, 2, rf | RT5390_RESCAL); run_delay(sc, 10); run_rt3070_rf_write(sc, 2, rf & ~RT5390_RESCAL); } else { run_rt3070_rf_write(sc, 2, RT5390_RESCAL); run_delay(sc, 10); } /* Initialize RF registers to default value. */ if (sc->mac_ver == 0x5592) { for (i = 0; i < nitems(rt5592_def_rf); i++) { run_rt3070_rf_write(sc, rt5592_def_rf[i].reg, rt5592_def_rf[i].val); } /* Initialize RF frequency offset. */ run_adjust_freq_offset(sc); } else if (sc->mac_ver == 0x5392) { for (i = 0; i < nitems(rt5392_def_rf); i++) { run_rt3070_rf_write(sc, rt5392_def_rf[i].reg, rt5392_def_rf[i].val); } if (sc->mac_rev >= 0x0223) { run_rt3070_rf_write(sc, 23, 0x0f); run_rt3070_rf_write(sc, 24, 0x3e); run_rt3070_rf_write(sc, 51, 0x32); run_rt3070_rf_write(sc, 53, 0x22); run_rt3070_rf_write(sc, 56, 0xc1); run_rt3070_rf_write(sc, 59, 0x0f); } } else { for (i = 0; i < nitems(rt5390_def_rf); i++) { run_rt3070_rf_write(sc, rt5390_def_rf[i].reg, rt5390_def_rf[i].val); } if (sc->mac_rev >= 0x0502) { run_rt3070_rf_write(sc, 6, 0xe0); run_rt3070_rf_write(sc, 25, 0x80); run_rt3070_rf_write(sc, 46, 0x73); run_rt3070_rf_write(sc, 53, 0x00); run_rt3070_rf_write(sc, 56, 0x42); run_rt3070_rf_write(sc, 61, 0xd1); } } sc->rf24_20mhz = 0x1f; /* default value */ sc->rf24_40mhz = (sc->mac_ver == 0x5592) ? 0 : 0x2f; if (sc->mac_rev < 0x0211) run_rt3070_rf_write(sc, 27, 0x3); run_read(sc, RT3070_OPT_14, &tmp); run_write(sc, RT3070_OPT_14, tmp | 1); } static int run_rt3070_filter_calib(struct run_softc *sc, uint8_t init, uint8_t target, uint8_t *val) { uint8_t rf22, rf24; uint8_t bbp55_pb, bbp55_sb, delta; int ntries; /* program filter */ run_rt3070_rf_read(sc, 24, &rf24); rf24 = (rf24 & 0xc0) | init; /* initial filter value */ run_rt3070_rf_write(sc, 24, rf24); /* enable baseband loopback mode */ run_rt3070_rf_read(sc, 22, &rf22); run_rt3070_rf_write(sc, 22, rf22 | 0x01); /* set power and frequency of passband test tone */ run_bbp_write(sc, 24, 0x00); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ run_bbp_write(sc, 25, 0x90); run_delay(sc, 10); /* read received power */ run_bbp_read(sc, 55, &bbp55_pb); if (bbp55_pb != 0) break; } if (ntries == 100) return (ETIMEDOUT); /* set power and frequency of stopband test tone */ run_bbp_write(sc, 24, 0x06); for (ntries = 0; ntries < 100; ntries++) { /* transmit test tone */ run_bbp_write(sc, 25, 0x90); run_delay(sc, 10); /* read received power */ run_bbp_read(sc, 55, &bbp55_sb); delta = bbp55_pb - bbp55_sb; if (delta > target) break; /* reprogram filter */ rf24++; run_rt3070_rf_write(sc, 24, rf24); } if (ntries < 100) { if (rf24 != init) rf24--; /* backtrack */ *val = rf24; run_rt3070_rf_write(sc, 24, rf24); } /* restore initial state */ run_bbp_write(sc, 24, 0x00); /* disable baseband loopback mode */ run_rt3070_rf_read(sc, 22, &rf22); run_rt3070_rf_write(sc, 22, rf22 & ~0x01); return (0); } static void run_rt3070_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; int i; if (sc->mac_ver == 0x3572) { /* enable DC filter */ if (sc->mac_rev >= 0x0201) run_bbp_write(sc, 103, 0xc0); run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); if (sc->mac_rev >= 0x0211) { /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } run_rt3070_rf_read(sc, 16, &rf); rf = (rf & ~0x07) | sc->txmixgain_2ghz; run_rt3070_rf_write(sc, 16, rf); } else if (sc->mac_ver == 0x3071) { if (sc->mac_rev >= 0x0211) { /* enable DC filter */ run_bbp_write(sc, 103, 0xc0); /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } else if (sc->mac_ver == 0x3070) { if (sc->mac_rev >= 0x0201) { /* enable DC filter */ run_bbp_write(sc, 103, 0xc0); /* improve power consumption */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } if (sc->mac_rev < 0x0201) { run_write(sc, RT2860_TX_SW_CFG1, 0); run_write(sc, RT2860_TX_SW_CFG2, 0x2c); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } /* initialize RF registers from ROM for >=RT3071*/ if (sc->mac_ver >= 0x3071) { for (i = 0; i < 10; i++) { if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff) continue; run_rt3070_rf_write(sc, sc->rf[i].reg, sc->rf[i].val); } } } static void run_rt3593_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; if (sc->mac_rev >= 0x0211) { /* Enable DC filter. */ run_bbp_write(sc, 103, 0xc0); } run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); run_rt3070_rf_read(sc, 50, &rf); run_rt3070_rf_write(sc, 50, rf & ~RT3593_TX_LO2); run_rt3070_rf_read(sc, 51, &rf); rf = (rf & ~(RT3593_TX_LO1 | 0x0c)) | ((sc->txmixgain_2ghz & 0x07) << 2); run_rt3070_rf_write(sc, 51, rf); run_rt3070_rf_read(sc, 38, &rf); run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1); run_rt3070_rf_read(sc, 39, &rf); run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2); run_rt3070_rf_read(sc, 1, &rf); run_rt3070_rf_write(sc, 1, rf & ~(RT3070_RF_BLOCK | RT3070_PLL_PD)); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); /* Apply maximum likelihood detection for 2 stream case. */ run_bbp_read(sc, 105, &bbp); if (sc->nrxchains > 1) run_bbp_write(sc, 105, bbp | RT5390_MLD); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); run_bbp_write(sc, 92, 0x02); run_bbp_write(sc, 82, 0x82); run_bbp_write(sc, 106, 0x05); run_bbp_write(sc, 104, 0x92); run_bbp_write(sc, 88, 0x90); run_bbp_write(sc, 148, 0xc8); run_bbp_write(sc, 47, 0x48); run_bbp_write(sc, 120, 0x50); run_bbp_write(sc, 163, 0x9d); /* SNR mapping. */ run_bbp_write(sc, 142, 0x06); run_bbp_write(sc, 143, 0xa0); run_bbp_write(sc, 142, 0x07); run_bbp_write(sc, 143, 0xa1); run_bbp_write(sc, 142, 0x08); run_bbp_write(sc, 143, 0xa2); run_bbp_write(sc, 31, 0x08); run_bbp_write(sc, 68, 0x0b); run_bbp_write(sc, 105, 0x04); } static void run_rt5390_rf_setup(struct run_softc *sc) { uint8_t bbp, rf; if (sc->mac_rev >= 0x0211) { /* Enable DC filter. */ run_bbp_write(sc, 103, 0xc0); if (sc->mac_ver != 0x5592) { /* Improve power consumption. */ run_bbp_read(sc, 31, &bbp); run_bbp_write(sc, 31, bbp & ~0x03); } } run_bbp_read(sc, 138, &bbp); if (sc->ntxchains == 1) bbp |= 0x20; /* turn off DAC1 */ if (sc->nrxchains == 1) bbp &= ~0x02; /* turn off ADC1 */ run_bbp_write(sc, 138, bbp); run_rt3070_rf_read(sc, 38, &rf); run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1); run_rt3070_rf_read(sc, 39, &rf); run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2); /* Avoid data lost and CRC error. */ run_bbp_read(sc, 4, &bbp); run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL); run_rt3070_rf_read(sc, 30, &rf); rf = (rf & ~0x18) | 0x10; run_rt3070_rf_write(sc, 30, rf); if (sc->mac_ver != 0x5592) { run_write(sc, RT2860_TX_SW_CFG1, 0); if (sc->mac_rev < 0x0211) { run_write(sc, RT2860_TX_SW_CFG2, sc->patch_dac ? 0x2c : 0x0f); } else run_write(sc, RT2860_TX_SW_CFG2, 0); } } static int run_txrx_enable(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; int error, ntries; run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN); for (ntries = 0; ntries < 200; ntries++) { if ((error = run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp)) != 0) return (error); if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 50); } if (ntries == 200) return (ETIMEDOUT); run_delay(sc, 50); tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN | RT2860_TX_WB_DDONE; run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); /* enable Rx bulk aggregation (set timeout and limit) */ tmp = RT2860_USB_TX_EN | RT2860_USB_RX_EN | RT2860_USB_RX_AGG_EN | RT2860_USB_RX_AGG_TO(128) | RT2860_USB_RX_AGG_LMT(2); run_write(sc, RT2860_USB_DMA_CFG, tmp); /* set Rx filter */ tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL | RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK | RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV | RT2860_DROP_CFACK | RT2860_DROP_CFEND; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL; } run_write(sc, RT2860_RX_FILTR_CFG, tmp); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); return (0); } static void run_adjust_freq_offset(struct run_softc *sc) { uint8_t rf, tmp; run_rt3070_rf_read(sc, 17, &rf); tmp = rf; rf = (rf & ~0x7f) | (sc->freq & 0x7f); rf = MIN(rf, 0x5f); if (tmp != rf) run_mcu_cmd(sc, 0x74, (tmp << 8 ) | rf); } static void run_init_locked(struct run_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; uint8_t bbp1, bbp3; int i; int ridx; int ntries; if (ic->ic_nrunning > 1) return; run_stop(sc); if (run_load_microcode(sc) != 0) { device_printf(sc->sc_dev, "could not load 8051 microcode\n"); goto fail; } for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_ASIC_VER_ID, &tmp) != 0) goto fail; if (tmp != 0 && tmp != 0xffffffff) break; run_delay(sc, 10); } if (ntries == 100) goto fail; for (i = 0; i != RUN_EP_QUEUES; i++) run_setup_tx_list(sc, &sc->sc_epq[i]); run_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) goto fail; if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); goto fail; } tmp &= 0xff0; tmp |= RT2860_TX_WB_DDONE; run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); /* turn off PME_OEN to solve high-current issue */ run_read(sc, RT2860_SYS_CTRL, &tmp); run_write(sc, RT2860_SYS_CTRL, tmp & ~RT2860_PME_OEN); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); run_write(sc, RT2860_USB_DMA_CFG, 0); if (run_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset chipset\n"); goto fail; } run_write(sc, RT2860_MAC_SYS_CTRL, 0); /* init Tx power for all Tx rates (from EEPROM) */ for (ridx = 0; ridx < 5; ridx++) { if (sc->txpow20mhz[ridx] == 0xffffffff) continue; run_write(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); } for (i = 0; i < nitems(rt2870_def_mac); i++) run_write(sc, rt2870_def_mac[i].reg, rt2870_def_mac[i].val); run_write(sc, RT2860_WMM_AIFSN_CFG, 0x00002273); run_write(sc, RT2860_WMM_CWMIN_CFG, 0x00002344); run_write(sc, RT2860_WMM_CWMAX_CFG, 0x000034aa); if (sc->mac_ver >= 0x5390) { run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT | 4); if (sc->mac_ver >= 0x5392) { run_write(sc, RT2860_MAX_LEN_CFG, 0x00002fff); if (sc->mac_ver == 0x5592) { run_write(sc, RT2860_HT_FBK_CFG1, 0xedcba980); run_write(sc, RT2860_TXOP_HLDR_ET, 0x00000082); } else { run_write(sc, RT2860_HT_FBK_CFG1, 0xedcb4980); run_write(sc, RT2860_LG_FBK_CFG0, 0xedcba322); } } } else if (sc->mac_ver == 0x3593) { run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT | 2); } else if (sc->mac_ver >= 0x3070) { /* set delay of PA_PE assertion to 1us (unit of 0.25us) */ run_write(sc, RT2860_TX_SW_CFG0, 4 << RT2860_DLY_PAPE_EN_SHIFT); } /* wait while MAC is busy */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_MAC_STATUS_REG, &tmp) != 0) goto fail; if (!(tmp & (RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY))) break; run_delay(sc, 10); } if (ntries == 100) goto fail; /* clear Host to MCU mailbox */ run_write(sc, RT2860_H2M_BBPAGENT, 0); run_write(sc, RT2860_H2M_MAILBOX, 0); run_delay(sc, 10); if (run_bbp_init(sc) != 0) { device_printf(sc->sc_dev, "could not initialize BBP\n"); goto fail; } /* abort TSF synchronization */ run_read(sc, RT2860_BCN_TIME_CFG, &tmp); tmp &= ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN); run_write(sc, RT2860_BCN_TIME_CFG, tmp); /* clear RX WCID search table */ run_set_region_4(sc, RT2860_WCID_ENTRY(0), 0, 512); /* clear WCID attribute table */ run_set_region_4(sc, RT2860_WCID_ATTR(0), 0, 8 * 32); /* hostapd sets a key before init. So, don't clear it. */ if (sc->cmdq_key_set != RUN_CMDQ_GO) { /* clear shared key table */ run_set_region_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32); /* clear shared key mode */ run_set_region_4(sc, RT2860_SKEY_MODE_0_7, 0, 4); } run_read(sc, RT2860_US_CYC_CNT, &tmp); tmp = (tmp & ~0xff) | 0x1e; run_write(sc, RT2860_US_CYC_CNT, tmp); if (sc->mac_rev != 0x0101) run_write(sc, RT2860_TXOP_CTRL_CFG, 0x0000583f); run_write(sc, RT2860_WMM_TXOP0_CFG, 0); run_write(sc, RT2860_WMM_TXOP1_CFG, 48 << 16 | 96); /* write vendor-specific BBP values (from EEPROM) */ if (sc->mac_ver < 0x3593) { for (i = 0; i < 10; i++) { if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff) continue; run_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val); } } /* select Main antenna for 1T1R devices */ if (sc->rf_rev == RT3070_RF_3020 || sc->rf_rev == RT5390_RF_5370) run_set_rx_antenna(sc, 0); /* send LEDs operating mode to microcontroller */ (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0]); (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1]); (void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2]); if (sc->mac_ver >= 0x5390) run_rt5390_rf_init(sc); else if (sc->mac_ver == 0x3593) run_rt3593_rf_init(sc); else if (sc->mac_ver >= 0x3070) run_rt3070_rf_init(sc); /* disable non-existing Rx chains */ run_bbp_read(sc, 3, &bbp3); bbp3 &= ~(1 << 3 | 1 << 4); if (sc->nrxchains == 2) bbp3 |= 1 << 3; else if (sc->nrxchains == 3) bbp3 |= 1 << 4; run_bbp_write(sc, 3, bbp3); /* disable non-existing Tx chains */ run_bbp_read(sc, 1, &bbp1); if (sc->ntxchains == 1) bbp1 &= ~(1 << 3 | 1 << 4); run_bbp_write(sc, 1, bbp1); if (sc->mac_ver >= 0x5390) run_rt5390_rf_setup(sc); else if (sc->mac_ver == 0x3593) run_rt3593_rf_setup(sc); else if (sc->mac_ver >= 0x3070) run_rt3070_rf_setup(sc); /* select default channel */ run_set_chan(sc, ic->ic_curchan); /* setup initial protection mode */ run_updateprot_cb(ic); /* turn radio LED on */ run_set_leds(sc, RT2860_LED_RADIO); sc->sc_flags |= RUN_RUNNING; sc->cmdq_run = RUN_CMDQ_GO; for (i = 0; i != RUN_N_XFER; i++) usbd_xfer_set_stall(sc->sc_xfer[i]); usbd_transfer_start(sc->sc_xfer[RUN_BULK_RX]); if (run_txrx_enable(sc) != 0) goto fail; return; fail: run_stop(sc); } static void run_stop(void *arg) { struct run_softc *sc = (struct run_softc *)arg; uint32_t tmp; int i; int ntries; RUN_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_flags & RUN_RUNNING) run_set_leds(sc, 0); /* turn all LEDs off */ sc->sc_flags &= ~RUN_RUNNING; sc->ratectl_run = RUN_RATECTL_OFF; sc->cmdq_run = sc->cmdq_key_set; RUN_UNLOCK(sc); for(i = 0; i < RUN_N_XFER; i++) usbd_transfer_drain(sc->sc_xfer[i]); RUN_LOCK(sc); run_drain_mbufq(sc); if (sc->rx_m != NULL) { m_free(sc->rx_m); sc->rx_m = NULL; } /* Disable Tx/Rx DMA. */ if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) return; tmp &= ~(RT2860_RX_DMA_EN | RT2860_TX_DMA_EN); run_write(sc, RT2860_WPDMA_GLO_CFG, tmp); for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0) return; if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) break; run_delay(sc, 10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); return; } /* disable Tx/Rx */ run_read(sc, RT2860_MAC_SYS_CTRL, &tmp); tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); run_write(sc, RT2860_MAC_SYS_CTRL, tmp); /* wait for pending Tx to complete */ for (ntries = 0; ntries < 100; ntries++) { if (run_read(sc, RT2860_TXRXQ_PCNT, &tmp) != 0) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_RESET, "Cannot read Tx queue count\n"); break; } if ((tmp & RT2860_TX2Q_PCNT_MASK) == 0) { RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_RESET, "All Tx cleared\n"); break; } run_delay(sc, 10); } if (ntries >= 100) RUN_DPRINTF(sc, RUN_DEBUG_XMIT | RUN_DEBUG_RESET, "There are still pending Tx\n"); run_delay(sc, 10); run_write(sc, RT2860_USB_DMA_CFG, 0); run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); run_write(sc, RT2860_MAC_SYS_CTRL, 0); for (i = 0; i != RUN_EP_QUEUES; i++) run_unsetup_tx_list(sc, &sc->sc_epq[i]); } static void run_delay(struct run_softc *sc, u_int ms) { usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL, USB_MS_TO_TICKS(ms)); } static device_method_t run_methods[] = { /* Device interface */ DEVMETHOD(device_probe, run_match), DEVMETHOD(device_attach, run_attach), DEVMETHOD(device_detach, run_detach), DEVMETHOD_END }; static driver_t run_driver = { .name = "run", .methods = run_methods, .size = sizeof(struct run_softc) }; static devclass_t run_devclass; DRIVER_MODULE(run, uhub, run_driver, run_devclass, run_driver_loaded, NULL); MODULE_DEPEND(run, wlan, 1, 1, 1); MODULE_DEPEND(run, usb, 1, 1, 1); MODULE_DEPEND(run, firmware, 1, 1, 1); MODULE_VERSION(run, 1); USB_PNP_HOST_INFO(run_devs); Index: head/sys/dev/usb/wlan/if_ural.c =================================================================== --- head/sys/dev/usb/wlan/if_ural.c (revision 330687) +++ head/sys/dev/usb/wlan/if_ural.c (revision 330688) @@ -1,2242 +1,2228 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Copyright (c) 2006, 2008 * Hans Petter Selasky * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2500USB chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR ural_debug #include #include #include #ifdef USB_DEBUG static int ural_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural"); SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RWTUN, &ural_debug, 0, "Debug level"); #endif #define URAL_RSSI(rssi) \ ((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \ ((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0) /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID ural_devs[] = { #define URAL_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } URAL_DEV(ASUS, WL167G), URAL_DEV(ASUS, RT2570), URAL_DEV(BELKIN, F5D7050), URAL_DEV(BELKIN, F5D7051), URAL_DEV(CISCOLINKSYS, HU200TS), URAL_DEV(CISCOLINKSYS, WUSB54G), URAL_DEV(CISCOLINKSYS, WUSB54GP), URAL_DEV(CONCEPTRONIC2, C54RU), URAL_DEV(DLINK, DWLG122), URAL_DEV(GIGABYTE, GN54G), URAL_DEV(GIGABYTE, GNWBKG), URAL_DEV(GUILLEMOT, HWGUSB254), URAL_DEV(MELCO, KG54), URAL_DEV(MELCO, KG54AI), URAL_DEV(MELCO, KG54YB), URAL_DEV(MELCO, NINWIFI), URAL_DEV(MSI, RT2570), URAL_DEV(MSI, RT2570_2), URAL_DEV(MSI, RT2570_3), URAL_DEV(NOVATECH, NV902), URAL_DEV(RALINK, RT2570), URAL_DEV(RALINK, RT2570_2), URAL_DEV(RALINK, RT2570_3), URAL_DEV(SIEMENS2, WL54G), URAL_DEV(SMC, 2862WG), URAL_DEV(SPHAIRON, UB801R), URAL_DEV(SURECOM, RT2570), URAL_DEV(VTECH, RT2570), URAL_DEV(ZINWELL, RT2570), #undef URAL_DEV }; static usb_callback_t ural_bulk_read_callback; static usb_callback_t ural_bulk_write_callback; static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data); static struct ieee80211vap *ural_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ural_vap_delete(struct ieee80211vap *); static void ural_tx_free(struct ural_tx_data *, int); static void ural_setup_tx_list(struct ural_softc *); static void ural_unsetup_tx_list(struct ural_softc *); static int ural_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, uint32_t, int, int); static int ural_tx_bcn(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_mgt(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_data(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_transmit(struct ieee80211com *, struct mbuf *); static void ural_start(struct ural_softc *); static void ural_parent(struct ieee80211com *); static void ural_set_testmode(struct ural_softc *); static void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); static uint16_t ural_read(struct ural_softc *, uint16_t); static void ural_read_multi(struct ural_softc *, uint16_t, void *, int); static void ural_write(struct ural_softc *, uint16_t, uint16_t); static void ural_write_multi(struct ural_softc *, uint16_t, void *, int) __unused; static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); static uint8_t ural_bbp_read(struct ural_softc *, uint8_t); static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); static void ural_scan_start(struct ieee80211com *); static void ural_scan_end(struct ieee80211com *); static void ural_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void ural_set_channel(struct ieee80211com *); static void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); static void ural_disable_rf_tune(struct ural_softc *); static void ural_enable_tsf_sync(struct ural_softc *); static void ural_enable_tsf(struct ural_softc *); static void ural_update_slot(struct ural_softc *); static void ural_set_txpreamble(struct ural_softc *); static void ural_set_basicrates(struct ural_softc *, const struct ieee80211_channel *); static void ural_set_bssid(struct ural_softc *, const uint8_t *); static void ural_set_macaddr(struct ural_softc *, const uint8_t *); static void ural_update_promisc(struct ieee80211com *); static void ural_setpromisc(struct ural_softc *); static const char *ural_get_rf(int); static void ural_read_eeprom(struct ural_softc *); static int ural_bbp_init(struct ural_softc *); static void ural_set_txantenna(struct ural_softc *, int); static void ural_set_rxantenna(struct ural_softc *, int); static void ural_init(struct ural_softc *); static void ural_stop(struct ural_softc *); static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ural_ratectl_start(struct ural_softc *, struct ieee80211_node *); static void ural_ratectl_timeout(void *); static void ural_ratectl_task(void *, int); static int ural_pause(struct ural_softc *sc, int timeout); /* * Default values for MAC registers; values taken from the reference driver. */ static const struct { uint16_t reg; uint16_t val; } ural_def_mac[] = { { RAL_TXRX_CSR5, 0x8c8d }, { RAL_TXRX_CSR6, 0x8b8a }, { RAL_TXRX_CSR7, 0x8687 }, { RAL_TXRX_CSR8, 0x0085 }, { RAL_MAC_CSR13, 0x1111 }, { RAL_MAC_CSR14, 0x1e11 }, { RAL_TXRX_CSR21, 0xe78f }, { RAL_MAC_CSR9, 0xff1d }, { RAL_MAC_CSR11, 0x0002 }, { RAL_MAC_CSR22, 0x0053 }, { RAL_MAC_CSR15, 0x0000 }, { RAL_MAC_CSR8, RAL_FRAME_SIZE }, { RAL_TXRX_CSR19, 0x0000 }, { RAL_TXRX_CSR18, 0x005a }, { RAL_PHY_CSR2, 0x0000 }, { RAL_TXRX_CSR0, 0x1ec0 }, { RAL_PHY_CSR4, 0x000f } }; /* * Default values for BBP registers; values taken from the reference driver. */ static const struct { uint8_t reg; uint8_t val; } ural_def_bbp[] = { { 3, 0x02 }, { 4, 0x19 }, { 14, 0x1c }, { 15, 0x30 }, { 16, 0xac }, { 17, 0x48 }, { 18, 0x18 }, { 19, 0xff }, { 20, 0x1e }, { 21, 0x08 }, { 22, 0x08 }, { 23, 0x08 }, { 24, 0x80 }, { 25, 0x50 }, { 26, 0x08 }, { 27, 0x23 }, { 30, 0x10 }, { 31, 0x2b }, { 32, 0xb9 }, { 34, 0x12 }, { 35, 0x50 }, { 39, 0xc4 }, { 40, 0x02 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 56, 0x08 }, { 57, 0x10 }, { 58, 0x08 }, { 61, 0x60 }, { 62, 0x10 }, { 75, 0xff } }; /* * Default values for RF register R2 indexed by channel numbers. */ static const uint32_t ural_rf2522_r2[] = { 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e }; static const uint32_t ural_rf2523_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2524_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2525_r2[] = { 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 }; static const uint32_t ural_rf2525_hi_r2[] = { 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e }; static const uint32_t ural_rf2525e_r2[] = { 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b }; static const uint32_t ural_rf2526_hi_r2[] = { 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 }; static const uint32_t ural_rf2526_r2[] = { 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d }; /* * For dual-band RF, RF registers R1 and R4 also depend on channel number; * values taken from the reference driver. */ static const struct { uint8_t chan; uint32_t r1; uint32_t r2; uint32_t r4; } ural_rf5222[] = { { 1, 0x08808, 0x0044d, 0x00282 }, { 2, 0x08808, 0x0044e, 0x00282 }, { 3, 0x08808, 0x0044f, 0x00282 }, { 4, 0x08808, 0x00460, 0x00282 }, { 5, 0x08808, 0x00461, 0x00282 }, { 6, 0x08808, 0x00462, 0x00282 }, { 7, 0x08808, 0x00463, 0x00282 }, { 8, 0x08808, 0x00464, 0x00282 }, { 9, 0x08808, 0x00465, 0x00282 }, { 10, 0x08808, 0x00466, 0x00282 }, { 11, 0x08808, 0x00467, 0x00282 }, { 12, 0x08808, 0x00468, 0x00282 }, { 13, 0x08808, 0x00469, 0x00282 }, { 14, 0x08808, 0x0046b, 0x00286 }, { 36, 0x08804, 0x06225, 0x00287 }, { 40, 0x08804, 0x06226, 0x00287 }, { 44, 0x08804, 0x06227, 0x00287 }, { 48, 0x08804, 0x06228, 0x00287 }, { 52, 0x08804, 0x06229, 0x00287 }, { 56, 0x08804, 0x0622a, 0x00287 }, { 60, 0x08804, 0x0622b, 0x00287 }, { 64, 0x08804, 0x0622c, 0x00287 }, { 100, 0x08804, 0x02200, 0x00283 }, { 104, 0x08804, 0x02201, 0x00283 }, { 108, 0x08804, 0x02202, 0x00283 }, { 112, 0x08804, 0x02203, 0x00283 }, { 116, 0x08804, 0x02204, 0x00283 }, { 120, 0x08804, 0x02205, 0x00283 }, { 124, 0x08804, 0x02206, 0x00283 }, { 128, 0x08804, 0x02207, 0x00283 }, { 132, 0x08804, 0x02208, 0x00283 }, { 136, 0x08804, 0x02209, 0x00283 }, { 140, 0x08804, 0x0220a, 0x00283 }, { 149, 0x08808, 0x02429, 0x00281 }, { 153, 0x08808, 0x0242b, 0x00281 }, { 157, 0x08808, 0x0242d, 0x00281 }, { 161, 0x08808, 0x0242f, 0x00281 } }; static const uint8_t ural_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t ural_chan_5ghz[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161 }; static const struct usb_config ural_config[URAL_N_TRANSFER] = { [URAL_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE + 4), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = ural_bulk_write_callback, .timeout = 5000, /* ms */ }, [URAL_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (RAL_FRAME_SIZE + RAL_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = ural_bulk_read_callback, }, }; static device_probe_t ural_match; static device_attach_t ural_attach; static device_detach_t ural_detach; static device_method_t ural_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ural_match), DEVMETHOD(device_attach, ural_attach), DEVMETHOD(device_detach, ural_detach), DEVMETHOD_END }; static driver_t ural_driver = { .name = "ural", .methods = ural_methods, .size = sizeof(struct ural_softc), }; static devclass_t ural_devclass; DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, NULL, 0); MODULE_DEPEND(ural, usb, 1, 1, 1); MODULE_DEPEND(ural, wlan, 1, 1, 1); MODULE_VERSION(ural, 1); USB_PNP_HOST_INFO(ural_devs); static int ural_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RAL_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(ural_devs, sizeof(ural_devs), uaa)); } static int ural_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct ural_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint8_t iface_index; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RAL_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, ural_config, URAL_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RAL_LOCK(sc); /* retrieve RT2570 rev. no */ sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); /* retrieve MAC address and various other things from EEPROM */ ural_read_eeprom(sc); RAL_UNLOCK(sc); device_printf(self, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n", sc->asic_rev, ural_get_rf(sc->rf_rev)); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; ural_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_update_promisc = ural_update_promisc; ic->ic_raw_xmit = ural_raw_xmit; ic->ic_scan_start = ural_scan_start; ic->ic_scan_end = ural_scan_end; ic->ic_getradiocaps = ural_getradiocaps; ic->ic_set_channel = ural_set_channel; ic->ic_parent = ural_parent; ic->ic_transmit = ural_transmit; ic->ic_vap_create = ural_vap_create; ic->ic_vap_delete = ural_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RAL_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RAL_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: ural_detach(self); return (ENXIO); /* failure */ } static int ural_detach(device_t self) { struct ural_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; /* prevent further ioctls */ RAL_LOCK(sc); sc->sc_detached = 1; RAL_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URAL_N_TRANSFER); /* free TX list, if any */ RAL_LOCK(sc); ural_unsetup_tx_list(sc); RAL_UNLOCK(sc); if (ic->ic_softc == sc) ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (ural_pause(sc, hz / 100)) break; } return (err); } static struct ieee80211vap * ural_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ural_softc *sc = ic->ic_softc; struct ural_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; uvp = malloc(sizeof(struct ural_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = ural_newstate; usb_callout_init_mtx(&uvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&uvp->ratectl_task, 0, ural_ratectl_task, uvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void ural_vap_delete(struct ieee80211vap *vap) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; usb_callout_drain(&uvp->ratectl_ch); ieee80211_draintask(ic, &uvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void ural_tx_free(struct ural_tx_data *data, int txerr) { struct ural_softc *sc = data->sc; if (data->m != NULL) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void ural_setup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void ural_unsetup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ural_softc *sc = ic->ic_softc; const struct ieee80211_txparam *tp; struct ieee80211_node *ni; struct mbuf *m; DPRINTF("%s -> %s\n", ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RAL_LOCK(sc); usb_callout_stop(&uvp->ratectl_ch); switch (nstate) { case IEEE80211_S_INIT: if (vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); /* force tx led to stop blinking */ ural_write(sc, RAL_MAC_CSR20, 0); } break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) goto fail; ural_update_slot(sc); ural_set_txpreamble(sc); ural_set_basicrates(sc, ic->ic_bsschan); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); ural_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); goto fail; } ieee80211_ref_node(ni); if (ural_tx_bcn(sc, m, ni) != 0) { device_printf(sc->sc_dev, "could not send beacon\n"); goto fail; } } /* make tx led blink on tx (controlled by ASIC) */ ural_write(sc, RAL_MAC_CSR20, 1); if (vap->iv_opmode != IEEE80211_M_MONITOR) ural_enable_tsf_sync(sc); else ural_enable_tsf(sc); /* enable automatic rate adaptation */ /* XXX should use ic_bsschan but not valid until after newstate call below */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ural_ratectl_start(sc, ni); ieee80211_free_node(ni); break; default: break; } RAL_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); fail: RAL_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } static void ural_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ieee80211vap *vap; struct ural_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", len); /* free resources */ data = usbd_xfer_get_priv(xfer); ural_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(RAL_FRAME_SIZE + RAL_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RAL_TX_DESC_SIZE); usbd_m_copy_in(pc, RAL_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* xfer length needs to be a multiple of two! */ len = (RAL_TX_DESC_SIZE + m->m_pkthdr.len + 1) & ~1; if ((len % 64) == 0) len += 2; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } ural_start(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); data = usbd_xfer_get_priv(xfer); if (data != NULL) { ural_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error == USB_ERR_STALLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); break; } } static void ural_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; int8_t rssi = 0, nf = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < (int)(RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN)) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len -= RAL_RX_DESC_SIZE; /* rx descriptor is located at the end */ pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, len, &sc->sc_rx_desc, RAL_RX_DESC_SIZE); rssi = URAL_RSSI(sc->sc_rx_desc.rssi); nf = RAL_NOISE_FLOOR; flags = le32toh(sc->sc_rx_desc.flags); if (flags & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { /* * This should not happen since we did not * request to receive those frames when we * filled RAL_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } usbd_copy_out(pc, 0, mtod(m, uint8_t *), len); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; if (ieee80211_radiotap_active(ic)) { struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; /* XXX set once */ tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RAL_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } /* Strip trailing 802.11 MAC FCS. */ m_adj(m, -IEEE80211_CRC_LEN); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RAL_UNLOCK(sc); if (m) { ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); } RAL_LOCK(sc); ural_start(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t ural_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, uint32_t flags, int len, int rate) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RAL_TX_NEWSEQ); desc->flags |= htole32(len << 16); desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5)); desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame))); /* setup PLCP fields */ desc->plcp_signal = ural_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RAL_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { if (rate == 0) rate = 2; /* avoid division by zero */ plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RAL_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->iv = 0; desc->eiv = 0; } #define RAL_TX_TIMEOUT 5000 static int ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ural_tx_data *data; if (sc->tx_nfree == 0) { m_freem(m0); ieee80211_free_node(ni); return (EIO); } if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { m_freem(m0); ieee80211_free_node(ni); return (ENXIO); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; ural_setup_tx_desc(sc, &data->desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending beacon frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return (0); } static int ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; uint32_t flags; uint16_t dur; RAL_LOCK_ASSERT(sc, MA_OWNED); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RAL_TX_TIMESTAMP; } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_sendprot(struct ural_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; - const struct ieee80211_frame *wh; struct ural_tx_data *data; struct mbuf *mprot; - int protrate, ackrate, pktlen, flags, isshort; - uint16_t dur; + int protrate, flags; - KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, - ("protection %d", prot)); + mprot = ieee80211_alloc_prot(ni, m, rate, prot); + if (mprot == NULL) { + if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); + device_printf(sc->sc_dev, + "could not allocate mbuf for protection mode %d\n", prot); + return ENOBUFS; + } - wh = mtod(m, const struct ieee80211_frame *); - pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; - protrate = ieee80211_ctl_rate(ic->ic_rt, rate); - ackrate = ieee80211_ack_rate(ic->ic_rt, rate); - - isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; - dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) - + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RAL_TX_RETRY(7); - if (prot == IEEE80211_PROT_RTSCTS) { - /* NB: CTS is the same size as an ACK */ - dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); + if (prot == IEEE80211_PROT_RTSCTS) flags |= RAL_TX_ACK; - mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); - } else { - mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); - } - if (mprot == NULL) { - /* XXX stat + msg */ - return ENOBUFS; - } + data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; ural_setup_tx_desc(sc, &data->desc, flags, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; uint32_t flags; int error; int rate; RAL_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RAL_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = ural_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; int error, rate; RAL_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m0, struct ieee80211_frame *); if (m0->m_flags & M_EAPOL) rate = tp->mgmtrate; else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = ural_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; flags |= RAL_TX_RETRY(7); dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending data frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ural_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if (!sc->sc_running) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } ural_start(sc); RAL_UNLOCK(sc); return (0); } static void ural_start(struct ural_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_running == 0) return; while (sc->tx_nfree >= RAL_TX_MINFREE && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ural_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } } } static void ural_parent(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (sc->sc_detached) { RAL_UNLOCK(sc); return; } if (ic->ic_nrunning > 0) { if (sc->sc_running == 0) { ural_init(sc); startall = 1; } else ural_setpromisc(sc); } else if (sc->sc_running) ural_stop(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void ural_set_testmode(struct ural_softc *sc) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_VENDOR_REQUEST; USETW(req.wValue, 4); USETW(req.wIndex, 1); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not set test mode: %s\n", usbd_errstr(error)); } } static void ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint16_t ural_read(struct ural_softc *sc, uint16_t reg) { struct usb_device_request req; usb_error_t error; uint16_t val; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, sizeof (uint16_t)); error = ural_do_request(sc, &req, &val); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); return 0; } return le16toh(val); } static void ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); } } static void ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MAC; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) { uint16_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = reg << 8 | val; ural_write(sc, RAL_PHY_CSR7, tmp); } static uint8_t ural_bbp_read(struct ural_softc *sc, uint8_t reg) { uint16_t val; int ntries; val = RAL_BBP_WRITE | reg << 8; ural_write(sc, RAL_PHY_CSR7, val); for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } return ural_read(sc, RAL_PHY_CSR7) & 0xff; } static void ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); ural_write(sc, RAL_PHY_CSR10, tmp >> 16); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void ural_scan_start(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_write(sc, RAL_TXRX_CSR19, 0); ural_set_bssid(sc, ieee80211broadcastaddr); RAL_UNLOCK(sc); } static void ural_scan_end(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_enable_tsf_sync(sc); ural_set_bssid(sc, sc->sc_bssid); RAL_UNLOCK(sc); } static void ural_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct ural_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, ural_chan_2ghz, nitems(ural_chan_2ghz), bands, 0); if (sc->rf_rev == RAL_RF_5222) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, ural_chan_5ghz, nitems(ural_chan_5ghz), bands, 0); } } static void ural_set_channel(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } static void ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint8_t power, tmp; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RAL_RF_2522: ural_rf_write(sc, RAL_RF1, 0x00814); ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RAL_RF_2523: ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2524: ural_rf_write(sc, RAL_RF1, 0x0c808); ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525E: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RAL_RF_2526: ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RAL_RF_5222: for (i = 0; ural_rf5222[i].chan != chan; i++); ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1); ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4); break; } if (ic->ic_opmode != IEEE80211_M_MONITOR && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = ural_bbp_read(sc, 70); tmp &= ~RAL_JAPAN_FILTER; if (chan == 14) tmp |= RAL_JAPAN_FILTER; ural_bbp_write(sc, 70, tmp); /* clear CRC errors */ ural_read(sc, RAL_STA_CSR0); ural_pause(sc, hz / 100); ural_disable_rf_tune(sc); } /* XXX doesn't belong here */ /* update basic rate set */ ural_set_basicrates(sc, c); /* give the hardware some time to do the switchover */ ural_pause(sc, hz / 100); } /* * Disable RF auto-tuning. */ static void ural_disable_rf_tune(struct ural_softc *sc) { uint32_t tmp; if (sc->rf_rev != RAL_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; ural_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; ural_rf_write(sc, RAL_RF3, tmp); DPRINTFN(2, "disabling RF autotune\n"); } /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void ural_enable_tsf_sync(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload, tmp; /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); tmp = (16 * vap->iv_bss->ni_intval) << 4; ural_write(sc, RAL_TXRX_CSR18, tmp); logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0; preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6; tmp = logcwmin << 12 | preload; ural_write(sc, RAL_TXRX_CSR20, tmp); /* finally, enable TSF synchronization */ tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RAL_ENABLE_TSF_SYNC(1); else tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; ural_write(sc, RAL_TXRX_CSR19, tmp); DPRINTF("enabling TSF synchronization\n"); } static void ural_enable_tsf(struct ural_softc *sc) { /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); ural_write(sc, RAL_TXRX_CSR19, RAL_ENABLE_TSF | RAL_ENABLE_TSF_SYNC(2)); } #define RAL_RXTX_TURNAROUND 5 /* us */ static void ural_update_slot(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t slottime, sifs, eifs; slottime = IEEE80211_GET_SLOTTIME(ic); /* * These settings may sound a bit inconsistent but this is what the * reference driver does. */ if (ic->ic_curmode == IEEE80211_MODE_11B) { sifs = 16 - RAL_RXTX_TURNAROUND; eifs = 364; } else { sifs = 10 - RAL_RXTX_TURNAROUND; eifs = 64; } ural_write(sc, RAL_MAC_CSR10, slottime); ural_write(sc, RAL_MAC_CSR11, sifs); ural_write(sc, RAL_MAC_CSR12, eifs); } static void ural_set_txpreamble(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR10); tmp &= ~RAL_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RAL_SHORT_PREAMBLE; ural_write(sc, RAL_TXRX_CSR10, tmp); } static void ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c) { /* XXX wrong, take from rate set */ /* update basic rate set */ if (IEEE80211_IS_CHAN_5GHZ(c)) { /* 11a basic rates: 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x150); } else if (IEEE80211_IS_CHAN_ANYG(c)) { /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); } else { /* 11b basic rates: 1, 2Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x3); } } static void ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) { uint16_t tmp; tmp = bssid[0] | bssid[1] << 8; ural_write(sc, RAL_MAC_CSR5, tmp); tmp = bssid[2] | bssid[3] << 8; ural_write(sc, RAL_MAC_CSR6, tmp); tmp = bssid[4] | bssid[5] << 8; ural_write(sc, RAL_MAC_CSR7, tmp); DPRINTF("setting BSSID to %6D\n", bssid, ":"); } static void ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr) { uint16_t tmp; tmp = addr[0] | addr[1] << 8; ural_write(sc, RAL_MAC_CSR2, tmp); tmp = addr[2] | addr[3] << 8; ural_write(sc, RAL_MAC_CSR3, tmp); tmp = addr[4] | addr[5] << 8; ural_write(sc, RAL_MAC_CSR4, tmp); DPRINTF("setting MAC address to %6D\n", addr, ":"); } static void ural_setpromisc(struct ural_softc *sc) { uint32_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR2); tmp &= ~RAL_DROP_NOT_TO_ME; if (sc->sc_ic.ic_promisc == 0) tmp |= RAL_DROP_NOT_TO_ME; ural_write(sc, RAL_TXRX_CSR2, tmp); DPRINTF("%s promiscuous mode\n", sc->sc_ic.ic_promisc ? "entering" : "leaving"); } static void ural_update_promisc(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); if (sc->sc_running) ural_setpromisc(sc); RAL_UNLOCK(sc); } static const char * ural_get_rf(int rev) { switch (rev) { case RAL_RF_2522: return "RT2522"; case RAL_RF_2523: return "RT2523"; case RAL_RF_2524: return "RT2524"; case RAL_RF_2525: return "RT2525"; case RAL_RF_2525E: return "RT2525e"; case RAL_RF_2526: return "RT2526"; case RAL_RF_5222: return "RT5222"; default: return "unknown"; } } static void ural_read_eeprom(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t val; ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read MAC address */ ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_macaddr, 6); /* read default values for BBP registers */ ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); /* read Tx power for all b/g channels */ ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); } static int ural_bbp_init(struct ural_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(ural_def_bbp); i++) ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); #if 0 /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0xff) continue; ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif return 0; } static void ural_set_txantenna(struct ural_softc *sc, int antenna) { uint16_t tmp; uint8_t tx; tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; if (antenna == 1) tx |= RAL_BBP_ANTA; else if (antenna == 2) tx |= RAL_BBP_ANTB; else tx |= RAL_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || sc->rf_rev == RAL_RF_5222) tx |= RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_TX, tx); /* update values in PHY_CSR5 and PHY_CSR6 */ tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); } static void ural_set_rxantenna(struct ural_softc *sc, int antenna) { uint8_t rx; rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; if (antenna == 1) rx |= RAL_BBP_ANTA; else if (antenna == 2) rx |= RAL_BBP_ANTB; else rx |= RAL_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) rx &= ~RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_RX, rx); } static void ural_init(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t tmp; int i, ntries; RAL_LOCK_ASSERT(sc, MA_OWNED); ural_set_testmode(sc); ural_write(sc, 0x308, 0x00f0); /* XXX magic */ ural_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < nitems(ural_def_mac); i++) ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); /* wait for BBP and RF to wake up (this can take a long time!) */ for (ntries = 0; ntries < 100; ntries++) { tmp = ural_read(sc, RAL_MAC_CSR17); if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == (RAL_BBP_AWAKE | RAL_RF_AWAKE)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } /* we're ready! */ ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); /* set basic rate set (will be updated later) */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); if (ural_bbp_init(sc) != 0) goto fail; ural_set_chan(sc, ic->ic_curchan); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); ural_set_txantenna(sc, sc->tx_ant); ural_set_rxantenna(sc, sc->rx_ant); ural_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* * Allocate Tx and Rx xfer queues. */ ural_setup_tx_list(sc); /* kick Rx */ tmp = RAL_DROP_PHY | RAL_DROP_CRC; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RAL_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RAL_DROP_NOT_TO_ME; } ural_write(sc, RAL_TXRX_CSR2, tmp); sc->sc_running = 1; usbd_xfer_set_stall(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_start(sc->sc_xfer[URAL_BULK_RD]); return; fail: ural_stop(sc); } static void ural_stop(struct ural_softc *sc) { RAL_LOCK_ASSERT(sc, MA_OWNED); sc->sc_running = 0; /* * Drain all the transfers, if not already drained: */ RAL_UNLOCK(sc); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_RD]); RAL_LOCK(sc); ural_unsetup_tx_list(sc); /* disable Rx */ ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); /* reset ASIC and BBP (but won't reset MAC registers!) */ ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); /* wait a little */ ural_pause(sc, hz / 10); ural_write(sc, RAL_MAC_CSR1, 0); /* wait a little */ ural_pause(sc, hz / 10); } static int ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->tx_nfree < RAL_TX_MINFREE) { RAL_UNLOCK(sc); m_freem(m); return EIO; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ural_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ural_tx_raw(sc, m, ni, params) != 0) goto bad; } RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } static void ural_ratectl_start(struct ural_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ural_vap *uvp = URAL_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); } static void ural_ratectl_timeout(void *arg) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &uvp->ratectl_task); } static void ural_ratectl_task(void *arg, int pending) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ural_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; int fail; RAL_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof(sc->sta)); txs->flags = IEEE80211_RATECTL_TX_STATS_RETRIES; txs->nsuccess = sc->sta[7] + /* TX ok w/o retry */ sc->sta[8]; /* TX ok w/ retry */ fail = sc->sta[9]; /* TX retry-fail count */ txs->nframes = txs->nsuccess + fail; /* XXX fail * maxretry */ txs->nretries = sc->sta[8] + fail; ieee80211_ratectl_tx_update(vap, txs); /* count TX retry-fail as Tx errors */ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, fail); usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); RAL_UNLOCK(sc); } static int ural_pause(struct ural_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } Index: head/sys/net80211/ieee80211_output.c =================================================================== --- head/sys/net80211/ieee80211_output.c (revision 330687) +++ head/sys/net80211/ieee80211_output.c (revision 330688) @@ -1,3891 +1,3924 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #include #if defined(INET) || defined(INET6) #include #endif #ifdef INET #include #include #include #endif #ifdef INET6 #include #endif #include #define ETHER_HEADER_COPY(dst, src) \ memcpy(dst, src, sizeof(struct ether_header)) static int ieee80211_fragment(struct ieee80211vap *, struct mbuf *, u_int hdrsize, u_int ciphdrsize, u_int mtu); static void ieee80211_tx_mgt_cb(struct ieee80211_node *, void *, int); #ifdef IEEE80211_DEBUG /* * Decide if an outbound management frame should be * printed when debugging is enabled. This filters some * of the less interesting frames that come frequently * (e.g. beacons). */ static __inline int doprint(struct ieee80211vap *vap, int subtype) { switch (subtype) { case IEEE80211_FC0_SUBTYPE_PROBE_RESP: return (vap->iv_opmode == IEEE80211_M_IBSS); } return 1; } #endif /* * Transmit a frame to the given destination on the given VAP. * * It's up to the caller to figure out the details of who this * is going to and resolving the node. * * This routine takes care of queuing it for power save, * A-MPDU state stuff, fast-frames state stuff, encapsulation * if required, then passing it up to the driver layer. * * This routine (for now) consumes the mbuf and frees the node * reference; it ideally will return a TX status which reflects * whether the mbuf was consumed or not, so the caller can * free the mbuf (if appropriate) and the node reference (again, * if appropriate.) */ int ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m, struct ieee80211_node *ni) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; int mcast; if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && (m->m_flags & M_PWR_SAV) == 0) { /* * Station in power save mode; pass the frame * to the 802.11 layer and continue. We'll get * the frame back when the time is right. * XXX lose WDS vap linkage? */ if (ieee80211_pwrsave(ni, m) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); /* * We queued it fine, so tell the upper layer * that we consumed it. */ return (0); } /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, m)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, ni->ni_macaddr, NULL, "%s", "classification failure"); vap->iv_stats.is_tx_classify++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (0); } /* * Stash the node pointer. Note that we do this after * any call to ieee80211_dwds_mcast because that code * uses any existing value for rcvif to identify the * interface it (might have been) received on. */ m->m_pkthdr.rcvif = (void *)ni; mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1: 0; BPF_MTAP(ifp, m); /* 802.3 tx */ /* * Check if A-MPDU tx aggregation is setup or if we * should try to enable it. The sta must be associated * with HT and A-MPDU enabled for use. When the policy * routine decides we should enable A-MPDU we issue an * ADDBA request and wait for a reply. The frame being * encapsulated will go out w/o using A-MPDU, or possibly * it might be collected by the driver and held/retransmit. * The default ic_ampdu_enable routine handles staggering * ADDBA requests in case the receiver NAK's us or we are * otherwise unable to establish a BA stream. * * Don't treat group-addressed frames as candidates for aggregation; * net80211 doesn't support 802.11aa-2012 and so group addressed * frames will always have sequence numbers allocated from the NON_QOS * TID. */ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) && (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX)) { if ((m->m_flags & M_EAPOL) == 0 && (! mcast)) { int tid = WME_AC_TO_TID(M_WME_GETAC(m)); struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; ieee80211_txampdu_count_packet(tap); if (IEEE80211_AMPDU_RUNNING(tap)) { /* * Operational, mark frame for aggregation. * * XXX do tx aggregation here */ m->m_flags |= M_AMPDU_MPDU; } else if (!IEEE80211_AMPDU_REQUESTED(tap) && ic->ic_ampdu_enable(ni, tap)) { /* * Not negotiated yet, request service. */ ieee80211_ampdu_request(ni, tap); /* XXX hold frame for reply? */ } } } #ifdef IEEE80211_SUPPORT_SUPERG /* * Check for AMSDU/FF; queue for aggregation * * Note: we don't bother trying to do fast frames or * A-MSDU encapsulation for 802.3 drivers. Now, we * likely could do it for FF (because it's a magic * atheros tunnel LLC type) but I don't think we're going * to really need to. For A-MSDU we'd have to set the * A-MSDU QoS bit in the wifi header, so we just plain * can't do it. * * Strictly speaking, we could actually /do/ A-MSDU / FF * with A-MPDU together which for certain circumstances * is beneficial (eg A-MSDU of TCK ACKs.) However, * I'll ignore that for now so existing behaviour is maintained. * Later on it would be good to make "amsdu + ampdu" configurable. */ else if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) { if ((! mcast) && ieee80211_amsdu_tx_ok(ni)) { m = ieee80211_amsdu_check(ni, m); if (m == NULL) { /* NB: any ni ref held on stageq */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: amsdu_check queued frame\n", __func__); return (0); } } else if ((! mcast) && IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) { m = ieee80211_ff_check(ni, m); if (m == NULL) { /* NB: any ni ref held on stageq */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: ff_check queued frame\n", __func__); return (0); } } } #endif /* IEEE80211_SUPPORT_SUPERG */ /* * Grab the TX lock - serialise the TX process from this * point (where TX state is being checked/modified) * through to driver queue. */ IEEE80211_TX_LOCK(ic); /* * XXX make the encap and transmit code a separate function * so things like the FF (and later A-MSDU) path can just call * it for flushed frames. */ if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) { /* * Encapsulate the packet in prep for transmission. */ m = ieee80211_encap(vap, ni, m); if (m == NULL) { /* NB: stat+msg handled in ieee80211_encap */ IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(ni); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } } (void) ieee80211_parent_xmitpkt(ic, m); /* * Unlock at this point - no need to hold it across * ieee80211_free_node() (ie, the comlock) */ IEEE80211_TX_UNLOCK(ic); ic->ic_lastdata = ticks; return (0); } /* * Send the given mbuf through the given vap. * * This consumes the mbuf regardless of whether the transmit * was successful or not. * * This does none of the initial checks that ieee80211_start() * does (eg CAC timeout, interface wakeup) - the caller must * do this first. */ static int ieee80211_start_pkt(struct ieee80211vap *vap, struct mbuf *m) { #define IS_DWDS(vap) \ (vap->iv_opmode == IEEE80211_M_WDS && \ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; struct ether_header *eh; /* * Cancel any background scan. */ if (ic->ic_flags & IEEE80211_F_SCAN) ieee80211_cancel_anyscan(vap); /* * Find the node for the destination so we can do * things like power save and fast frames aggregation. * * NB: past this point various code assumes the first * mbuf has the 802.3 header present (and contiguous). */ ni = NULL; if (m->m_len < sizeof(struct ether_header) && (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "discard frame, %s\n", "m_pullup failed"); vap->iv_stats.is_tx_nobuf++; /* XXX */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } eh = mtod(m, struct ether_header *); if (ETHER_IS_MULTICAST(eh->ether_dhost)) { if (IS_DWDS(vap)) { /* * Only unicast frames from the above go out * DWDS vaps; multicast frames are handled by * dispatching the frame as it comes through * the AP vap (see below). */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS, eh->ether_dhost, "mcast", "%s", "on DWDS"); vap->iv_stats.is_dwds_mcast++; m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX better status? */ return (ENOBUFS); } if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* * Spam DWDS vap's w/ multicast traffic. */ /* XXX only if dwds in use? */ ieee80211_dwds_mcast(vap, m); } } #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode != IEEE80211_M_MBSS) { #endif ni = ieee80211_find_txnode(vap, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); /* XXX better status? */ return (ENOBUFS); } if (ni->ni_associd == 0 && (ni->ni_flags & IEEE80211_NODE_ASSOCID)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT, eh->ether_dhost, NULL, "sta not associated (type 0x%04x)", htons(eh->ether_type)); vap->iv_stats.is_tx_notassoc++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); ieee80211_free_node(ni); /* XXX better status? */ return (ENOBUFS); } #ifdef IEEE80211_SUPPORT_MESH } else { if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) { /* * Proxy station only if configured. */ if (!ieee80211_mesh_isproxyena(vap)) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_MESH, eh->ether_dhost, NULL, "%s", "proxy not enabled"); vap->iv_stats.is_mesh_notproxy++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); /* XXX better status? */ return (ENOBUFS); } IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "forward frame from DS SA(%6D), DA(%6D)\n", eh->ether_shost, ":", eh->ether_dhost, ":"); ieee80211_mesh_proxy_check(vap, eh->ether_shost); } ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m); if (ni == NULL) { /* * NB: ieee80211_mesh_discover holds/disposes * frame (e.g. queueing on path discovery). */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX better status? */ return (ENOBUFS); } } #endif /* * We've resolved the sender, so attempt to transmit it. */ if (vap->iv_state == IEEE80211_S_SLEEP) { /* * In power save; queue frame and then wakeup device * for transmit. */ ic->ic_lastdata = ticks; if (ieee80211_pwrsave(ni, m) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); ieee80211_new_state(vap, IEEE80211_S_RUN, 0); return (0); } if (ieee80211_vap_pkt_send_dest(vap, m, ni) != 0) return (ENOBUFS); return (0); #undef IS_DWDS } /* * Start method for vap's. All packets from the stack come * through here. We handle common processing of the packets * before dispatching them to the underlying device. * * if_transmit() requires that the mbuf be consumed by this call * regardless of the return condition. */ int ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; /* * No data frames go out unless we're running. * Note in particular this covers CAC and CSA * states (though maybe we should check muting * for CSA). */ if (vap->iv_state != IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_SLEEP) { IEEE80211_LOCK(ic); /* re-check under the com lock to avoid races */ if (vap->iv_state != IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_SLEEP) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: ignore queue, in %s state\n", __func__, ieee80211_state_name[vap->iv_state]); vap->iv_stats.is_tx_badstate++; IEEE80211_UNLOCK(ic); ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENETDOWN); } IEEE80211_UNLOCK(ic); } /* * Sanitize mbuf flags for net80211 use. We cannot * clear M_PWR_SAV or M_MORE_DATA because these may * be set for frames that are re-submitted from the * power save queue. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA); /* * Bump to the packet transmission path. * The mbuf will be consumed here. */ return (ieee80211_start_pkt(vap, m)); } void ieee80211_vap_qflush(struct ifnet *ifp) { /* Empty for now */ } /* * 802.11 raw output routine. * * XXX TODO: this (and other send routines) should correctly * XXX keep the pwr mgmt bit set if it decides to call into the * XXX driver to send a frame whilst the state is SLEEP. * * Otherwise the peer may decide that we're awake and flood us * with traffic we are still too asleep to receive! */ int ieee80211_raw_output(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = vap->iv_ic; int error; /* * Set node - the caller has taken a reference, so ensure * that the mbuf has the same node value that * it would if it were going via the normal path. */ m->m_pkthdr.rcvif = (void *)ni; /* * Attempt to add bpf transmit parameters. * * For now it's ok to fail; the raw_xmit api still takes * them as an option. * * Later on when ic_raw_xmit() has params removed, * they'll have to be added - so fail the transmit if * they can't be. */ if (params) (void) ieee80211_add_xmit_params(m, params); error = ic->ic_raw_xmit(ni, m, params); if (error) { if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } return (error); } static int ieee80211_validate_frame(struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211_frame *wh; int type; if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_ack)) return (EINVAL); wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) return (EINVAL); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; if (type != IEEE80211_FC0_TYPE_DATA) { if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) return (EINVAL); if (type != IEEE80211_FC0_TYPE_MGT && (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) != 0) return (EINVAL); /* XXX skip other field checks? */ } if ((params && (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0) || (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0) { int subtype; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * See IEEE Std 802.11-2012, * 8.2.4.1.9 'Protected Frame field' */ /* XXX no support for robust management frames yet. */ if (!(type == IEEE80211_FC0_TYPE_DATA || (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_AUTH))) return (EINVAL); wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; } if (m->m_pkthdr.len < ieee80211_anyhdrsize(wh)) return (EINVAL); return (0); } /* * 802.11 output routine. This is (currently) used only to * connect bpf write calls to the 802.11 layer for injecting * raw 802.11 frames. */ int ieee80211_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { #define senderr(e) do { error = (e); goto bad;} while (0) const struct ieee80211_bpf_params *params = NULL; struct ieee80211_node *ni = NULL; struct ieee80211vap *vap; struct ieee80211_frame *wh; struct ieee80211com *ic = NULL; int error; int ret; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { /* * Short-circuit requests if the vap is marked OACTIVE * as this can happen because a packet came down through * ieee80211_start before the vap entered RUN state in * which case it's ok to just drop the frame. This * should not be necessary but callers of if_output don't * check OACTIVE. */ senderr(ENETDOWN); } vap = ifp->if_softc; ic = vap->iv_ic; /* * Hand to the 802.3 code if not tagged as * a raw 802.11 frame. */ if (dst->sa_family != AF_IEEE80211) return vap->iv_output(ifp, m, dst, ro); #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) senderr(error); #endif if (ifp->if_flags & IFF_MONITOR) senderr(ENETDOWN); if (!IFNET_IS_UP_RUNNING(ifp)) senderr(ENETDOWN); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, "block %s frame in CAC state\n", "raw data"); vap->iv_stats.is_tx_badstate++; senderr(EIO); /* XXX */ } else if (vap->iv_state == IEEE80211_S_SCAN) senderr(EIO); /* XXX bypass bridge, pfil, carp, etc. */ /* * NB: DLT_IEEE802_11_RADIO identifies the parameters are * present by setting the sa_len field of the sockaddr (yes, * this is a hack). * NB: we assume sa_data is suitably aligned to cast. */ if (dst->sa_len != 0) params = (const struct ieee80211_bpf_params *)dst->sa_data; error = ieee80211_validate_frame(m, params); if (error != 0) senderr(error); wh = mtod(m, struct ieee80211_frame *); /* locate destination node */ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: case IEEE80211_FC1_DIR_FROMDS: ni = ieee80211_find_txnode(vap, wh->i_addr1); break; case IEEE80211_FC1_DIR_TODS: case IEEE80211_FC1_DIR_DSTODS: ni = ieee80211_find_txnode(vap, wh->i_addr3); break; default: senderr(EDOOFUS); } if (ni == NULL) { /* * Permit packets w/ bpf params through regardless * (see below about sa_len). */ if (dst->sa_len == 0) senderr(EHOSTUNREACH); ni = ieee80211_ref_node(vap->iv_bss); } /* * Sanitize mbuf for net80211 flags leaked from above. * * NB: This must be done before ieee80211_classify as * it marks EAPOL in frames with M_EAPOL. */ m->m_flags &= ~M_80211_TX; m->m_flags |= M_ENCAP; /* mark encapsulated */ if (IEEE80211_IS_DATA(wh)) { /* calculate priority so drivers can find the tx queue */ if (ieee80211_classify(ni, m)) senderr(EIO); /* XXX */ /* NB: ieee80211_encap does not include 802.11 header */ IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len - ieee80211_hdrsize(wh)); } else M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); IEEE80211_TX_LOCK(ic); ret = ieee80211_raw_output(vap, ni, m, params); IEEE80211_TX_UNLOCK(ic); return (ret); bad: if (m != NULL) m_freem(m); if (ni != NULL) ieee80211_free_node(ni); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return error; #undef senderr } /* * Set the direction field and address fields of an outgoing * frame. Note this should be called early on in constructing * a frame as it sets i_fc[1]; other bits can then be or'd in. */ void ieee80211_send_setup( struct ieee80211_node *ni, struct mbuf *m, int type, int tid, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN]) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)wh) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_tx_ampdu *tap; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); ieee80211_seq seqno; IEEE80211_TX_LOCK_ASSERT(ni->ni_ic); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type; if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) { switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, bssid); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, da); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); IEEE80211_ADDR_COPY(wh->i_addr3, bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, bssid); IEEE80211_ADDR_COPY(wh->i_addr3, sa); break; case IEEE80211_M_WDS: wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); break; case IEEE80211_M_MBSS: #ifdef IEEE80211_SUPPORT_MESH if (IEEE80211_IS_MULTICAST(da)) { wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; /* XXX next hop */ IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); } else { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, da); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa); } #endif break; case IEEE80211_M_MONITOR: /* NB: to quiet compiler */ break; } } else { wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, da); IEEE80211_ADDR_COPY(wh->i_addr2, sa); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) IEEE80211_ADDR_COPY(wh->i_addr3, sa); else #endif IEEE80211_ADDR_COPY(wh->i_addr3, bssid); } *(uint16_t *)&wh->i_dur[0] = 0; /* * XXX TODO: this is what the TX lock is for. * Here we're incrementing sequence numbers, and they * need to be in lock-step with what the driver is doing * both in TX ordering and crypto encap (IV increment.) * * If the driver does seqno itself, then we can skip * assigning sequence numbers here, and we can avoid * requiring the TX lock. */ tap = &ni->ni_tx_ampdu[tid]; if (tid != IEEE80211_NONQOS_TID && IEEE80211_AMPDU_RUNNING(tap)) { m->m_flags |= M_AMPDU_MPDU; /* NB: zero out i_seq field (for s/w encryption etc) */ *(uint16_t *)&wh->i_seq[0] = 0; } else { if (IEEE80211_HAS_SEQ(type & IEEE80211_FC0_TYPE_MASK, type & IEEE80211_FC0_SUBTYPE_MASK)) /* * 802.11-2012 9.3.2.10 - QoS multicast frames * come out of a different seqno space. */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; } else { seqno = ni->ni_txseqs[tid]++; } else seqno = 0; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } if (IEEE80211_IS_MULTICAST(wh->i_addr1)) m->m_flags |= M_MCAST; #undef WH4 } /* * Send a management frame to the specified node. The node pointer * must have a reference as the pointer will be passed to the driver * and potentially held for a long time. If the frame is successfully * dispatched to the driver, then it is responsible for freeing the * reference (and potentially free'ing up any associated storage); * otherwise deal with reclaiming any reference (on error). */ int ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type, struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; int ret; KASSERT(ni != NULL, ("null node")); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", ieee80211_mgt_subtype_name(type)); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(ni); m_freem(m); return EIO; /* XXX */ } M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); if (m == NULL) { ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1, "encrypting frame (%s)", __func__); wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; } m->m_flags |= M_ENCAP; /* mark encapsulated */ KASSERT(type != IEEE80211_FC0_SUBTYPE_PROBE_RESP, ("probe response?")); M_WME_SETAC(m, params->ibp_pri); #ifdef IEEE80211_DEBUG /* avoid printing too many frames */ if ((ieee80211_msg_debug(vap) && doprint(vap, type)) || ieee80211_msg_dumppkts(vap)) { printf("[%s] send %s on channel %u\n", ether_sprintf(wh->i_addr1), ieee80211_mgt_subtype_name(type), ieee80211_chan2ieee(ic, ic->ic_curchan)); } #endif IEEE80211_NODE_STAT(ni, tx_mgmt); ret = ieee80211_raw_output(vap, ni, m, params); IEEE80211_TX_UNLOCK(ic); return (ret); } static void ieee80211_nulldata_transmitted(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211vap *vap = ni->ni_vap; wakeup(vap); } /* * Send a null data frame to the specified node. If the station * is setup for QoS then a QoS Null Data frame is constructed. * If this is a WDS station then a 4-address frame is constructed. * * NB: the caller is assumed to have setup a node reference * for use; this is necessary to deal with a race condition * when probing for inactive stations. Like ieee80211_mgmt_output * we must cleanup any node reference on error; however we * can safely just unref it as we know it will never be the * last reference to the node. */ int ieee80211_send_nulldata(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m; struct ieee80211_frame *wh; int hdrlen; uint8_t *frm; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH, ni, "block %s frame in CAC state", "null data"); ieee80211_unref_node(&ni); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } if (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) hdrlen = sizeof(struct ieee80211_qosframe); else hdrlen = sizeof(struct ieee80211_frame); /* NB: only WDS vap's get 4-address frames */ if (vap->iv_opmode == IEEE80211_M_WDS) hdrlen += IEEE80211_ADDR_LEN; if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrlen = roundup(hdrlen, sizeof(uint32_t)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + hdrlen, 0); if (m == NULL) { /* XXX debug msg */ ieee80211_unref_node(&ni); vap->iv_stats.is_tx_nobuf++; return ENOMEM; } KASSERT(M_LEADINGSPACE(m) >= hdrlen, ("leading space %zd", M_LEADINGSPACE(m))); M_PREPEND(m, hdrlen, M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); return ENOMEM; } IEEE80211_TX_LOCK(ic); wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */ if (ni->ni_flags & IEEE80211_NODE_QOS) { const int tid = WME_AC_TO_TID(WME_AC_BE); uint8_t *qos; ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL, tid, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_WDS) qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; else qos = ((struct ieee80211_qosframe *) wh)->i_qos; qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[WME_AC_BE].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; qos[1] = 0; } else { ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA, IEEE80211_NONQOS_TID, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid); } if (vap->iv_opmode != IEEE80211_M_WDS) { /* NB: power management bit is never sent by an AP */ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && vap->iv_opmode != IEEE80211_M_HOSTAP) wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT; } if ((ic->ic_flags & IEEE80211_F_SCAN) && (ni->ni_flags & IEEE80211_NODE_PWR_MGT)) { ieee80211_add_callback(m, ieee80211_nulldata_transmitted, NULL); } m->m_len = m->m_pkthdr.len = hdrlen; m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_data); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, ni, "send %snull data frame on channel %u, pwr mgt %s", ni->ni_flags & IEEE80211_NODE_QOS ? "QoS " : "", ieee80211_chan2ieee(ic, ic->ic_curchan), wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis"); ret = ieee80211_raw_output(vap, ni, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Assign priority to a frame based on any vlan tag assigned * to the station and/or any Diffserv setting in an IP header. * Finally, if an ACM policy is setup (in station mode) it's * applied. */ int ieee80211_classify(struct ieee80211_node *ni, struct mbuf *m) { const struct ether_header *eh = NULL; uint16_t ether_type; int v_wme_ac, d_wme_ac, ac; if (__predict_false(m->m_flags & M_ENCAP)) { struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); struct llc *llc; int hdrlen, subtype; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype & IEEE80211_FC0_SUBTYPE_NODATA) { ac = WME_AC_BE; goto done; } hdrlen = ieee80211_hdrsize(wh); if (m->m_pkthdr.len < hdrlen + sizeof(*llc)) return 1; llc = (struct llc *)mtodo(m, hdrlen); if (llc->llc_dsap != LLC_SNAP_LSAP || llc->llc_ssap != LLC_SNAP_LSAP || llc->llc_control != LLC_UI || llc->llc_snap.org_code[0] != 0 || llc->llc_snap.org_code[1] != 0 || llc->llc_snap.org_code[2] != 0) return 1; ether_type = llc->llc_snap.ether_type; } else { eh = mtod(m, struct ether_header *); ether_type = eh->ether_type; } /* * Always promote PAE/EAPOL frames to high priority. */ if (ether_type == htons(ETHERTYPE_PAE)) { /* NB: mark so others don't need to check header */ m->m_flags |= M_EAPOL; ac = WME_AC_VO; goto done; } /* * Non-qos traffic goes to BE. */ if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0) { ac = WME_AC_BE; goto done; } /* * If node has a vlan tag then all traffic * to it must have a matching tag. */ v_wme_ac = 0; if (ni->ni_vlan != 0) { if ((m->m_flags & M_VLANTAG) == 0) { IEEE80211_NODE_STAT(ni, tx_novlantag); return 1; } if (EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != EVL_VLANOFTAG(ni->ni_vlan)) { IEEE80211_NODE_STAT(ni, tx_vlanmismatch); return 1; } /* map vlan priority to AC */ v_wme_ac = TID_TO_WME_AC(EVL_PRIOFTAG(ni->ni_vlan)); } /* XXX m_copydata may be too slow for fast path */ #ifdef INET if (eh && eh->ether_type == htons(ETHERTYPE_IP)) { uint8_t tos; /* * IP frame, map the DSCP bits from the TOS field. */ /* NB: ip header may not be in first mbuf */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip, ip_tos), sizeof(tos), &tos); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET */ #ifdef INET6 if (eh && eh->ether_type == htons(ETHERTYPE_IPV6)) { uint32_t flow; uint8_t tos; /* * IPv6 frame, map the DSCP bits from the traffic class field. */ m_copydata(m, sizeof(struct ether_header) + offsetof(struct ip6_hdr, ip6_flow), sizeof(flow), (caddr_t) &flow); tos = (uint8_t)(ntohl(flow) >> 20); tos >>= 5; /* NB: ECN + low 3 bits of DSCP */ d_wme_ac = TID_TO_WME_AC(tos); } else { #endif /* INET6 */ d_wme_ac = WME_AC_BE; #ifdef INET6 } #endif #ifdef INET } #endif /* * Use highest priority AC. */ if (v_wme_ac > d_wme_ac) ac = v_wme_ac; else ac = d_wme_ac; /* * Apply ACM policy. */ if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) { static const int acmap[4] = { WME_AC_BK, /* WME_AC_BE */ WME_AC_BK, /* WME_AC_BK */ WME_AC_BE, /* WME_AC_VI */ WME_AC_VI, /* WME_AC_VO */ }; struct ieee80211com *ic = ni->ni_ic; while (ac != WME_AC_BK && ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm) ac = acmap[ac]; } done: M_WME_SETAC(m, ac); return 0; } /* * Insure there is sufficient contiguous space to encapsulate the * 802.11 data frame. If room isn't already there, arrange for it. * Drivers and cipher modules assume we have done the necessary work * and fail rudely if they don't find the space they need. */ struct mbuf * ieee80211_mbuf_adjust(struct ieee80211vap *vap, int hdrsize, struct ieee80211_key *key, struct mbuf *m) { #define TO_BE_RECLAIMED (sizeof(struct ether_header) - sizeof(struct llc)) int needed_space = vap->iv_ic->ic_headroom + hdrsize; if (key != NULL) { /* XXX belongs in crypto code? */ needed_space += key->wk_cipher->ic_header; /* XXX frags */ /* * When crypto is being done in the host we must insure * the data are writable for the cipher routines; clone * a writable mbuf chain. * XXX handle SWMIC specially */ if (key->wk_flags & (IEEE80211_KEY_SWENCRYPT|IEEE80211_KEY_SWENMIC)) { m = m_unshare(m, M_NOWAIT); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot get writable mbuf\n", __func__); vap->iv_stats.is_tx_nobuf++; /* XXX new stat */ return NULL; } } } /* * We know we are called just before stripping an Ethernet * header and prepending an LLC header. This means we know * there will be * sizeof(struct ether_header) - sizeof(struct llc) * bytes recovered to which we need additional space for the * 802.11 header and any crypto header. */ /* XXX check trailing space and copy instead? */ if (M_LEADINGSPACE(m) < needed_space - TO_BE_RECLAIMED) { struct mbuf *n = m_gethdr(M_NOWAIT, m->m_type); if (n == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT, "%s: cannot expand storage\n", __func__); vap->iv_stats.is_tx_nobuf++; m_freem(m); return NULL; } KASSERT(needed_space <= MHLEN, ("not enough room, need %u got %d\n", needed_space, MHLEN)); /* * Setup new mbuf to have leading space to prepend the * 802.11 header and any crypto header bits that are * required (the latter are added when the driver calls * back to ieee80211_crypto_encap to do crypto encapsulation). */ /* NB: must be first 'cuz it clobbers m_data */ m_move_pkthdr(n, m); n->m_len = 0; /* NB: m_gethdr does not set */ n->m_data += needed_space; /* * Pull up Ethernet header to create the expected layout. * We could use m_pullup but that's overkill (i.e. we don't * need the actual data) and it cannot fail so do it inline * for speed. */ /* NB: struct ether_header is known to be contiguous */ n->m_len += sizeof(struct ether_header); m->m_len -= sizeof(struct ether_header); m->m_data += sizeof(struct ether_header); /* * Replace the head of the chain. */ n->m_next = m; m = n; } return m; #undef TO_BE_RECLAIMED } /* * Return the transmit key to use in sending a unicast frame. * If a unicast key is set we use that. When no unicast key is set * we fall back to the default transmit key. */ static __inline struct ieee80211_key * ieee80211_crypto_getucastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } else { return &ni->ni_ucastkey; } } /* * Return the transmit key to use in sending a multicast frame. * Multicast traffic always uses the group key which is installed as * the default tx key. */ static __inline struct ieee80211_key * ieee80211_crypto_getmcastkey(struct ieee80211vap *vap, struct ieee80211_node *ni) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE || IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey])) return NULL; return &vap->iv_nw_keys[vap->iv_def_txkey]; } /* * Encapsulate an outbound data frame. The mbuf chain is updated. * If an error is encountered NULL is returned. The caller is required * to provide a node reference and pullup the ethernet header in the * first mbuf. * * NB: Packet is assumed to be processed by ieee80211_classify which * marked EAPOL frames w/ M_EAPOL. */ struct mbuf * ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni, struct mbuf *m) { #define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh)) #define MC01(mc) ((struct ieee80211_meshcntl_ae01 *)mc) struct ieee80211com *ic = ni->ni_ic; #ifdef IEEE80211_SUPPORT_MESH struct ieee80211_mesh_state *ms = vap->iv_mesh; struct ieee80211_meshcntl_ae10 *mc; struct ieee80211_mesh_route *rt = NULL; int dir = -1; #endif struct ether_header eh; struct ieee80211_frame *wh; struct ieee80211_key *key; struct llc *llc; int hdrsize, hdrspace, datalen, addqos, txfrag, is4addr, is_mcast; ieee80211_seq seqno; int meshhdrsize, meshae; uint8_t *qos; int is_amsdu = 0; IEEE80211_TX_LOCK_ASSERT(ic); is_mcast = !! (m->m_flags & (M_MCAST | M_BCAST)); /* * Copy existing Ethernet header to a safe place. The * rest of the code assumes it's ok to strip it when * reorganizing state for the final encapsulation. */ KASSERT(m->m_len >= sizeof(eh), ("no ethernet header!")); ETHER_HEADER_COPY(&eh, mtod(m, caddr_t)); /* * Insure space for additional headers. First identify * transmit key to use in calculating any buffer adjustments * required. This is also used below to do privacy * encapsulation work. Then calculate the 802.11 header * size and any padding required by the driver. * * Note key may be NULL if we fall back to the default * transmit key and that is not set. In that case the * buffer may not be expanded as needed by the cipher * routines, but they will/should discard it. */ if (vap->iv_flags & IEEE80211_F_PRIVACY) { if (vap->iv_opmode == IEEE80211_M_STA || !IEEE80211_IS_MULTICAST(eh.ether_dhost) || (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) key = ieee80211_crypto_getucastkey(vap, ni); else key = ieee80211_crypto_getmcastkey(vap, ni); if (key == NULL && (m->m_flags & M_EAPOL) == 0) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, eh.ether_dhost, "no default transmit key (%s) deftxkey %u", __func__, vap->iv_def_txkey); vap->iv_stats.is_tx_nodefkey++; goto bad; } } else key = NULL; /* * XXX Some ap's don't handle QoS-encapsulated EAPOL * frames so suppress use. This may be an issue if other * ap's require all data frames to be QoS-encapsulated * once negotiated in which case we'll need to make this * configurable. * * Don't send multicast QoS frames. * Technically multicast frames can be QoS if all stations in the * BSS are also QoS. * * NB: mesh data frames are QoS, including multicast frames. */ addqos = (((is_mcast == 0) && (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT))) || (vap->iv_opmode == IEEE80211_M_MBSS)) && (m->m_flags & M_EAPOL) == 0; if (addqos) hdrsize = sizeof(struct ieee80211_qosframe); else hdrsize = sizeof(struct ieee80211_frame); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { /* * Mesh data frames are encapsulated according to the * rules of Section 11B.8.5 (p.139 of D3.0 spec). * o Group Addressed data (aka multicast) originating * at the local sta are sent w/ 3-address format and * address extension mode 00 * o Individually Addressed data (aka unicast) originating * at the local sta are sent w/ 4-address format and * address extension mode 00 * o Group Addressed data forwarded from a non-mesh sta are * sent w/ 3-address format and address extension mode 01 * o Individually Address data from another sta are sent * w/ 4-address format and address extension mode 10 */ is4addr = 0; /* NB: don't use, disable */ if (!IEEE80211_IS_MULTICAST(eh.ether_dhost)) { rt = ieee80211_mesh_rt_find(vap, eh.ether_dhost); KASSERT(rt != NULL, ("route is NULL")); dir = IEEE80211_FC1_DIR_DSTODS; hdrsize += IEEE80211_ADDR_LEN; if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) { if (IEEE80211_ADDR_EQ(rt->rt_mesh_gate, vap->iv_myaddr)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, eh.ether_dhost, "%s", "trying to send to ourself"); goto bad; } meshae = IEEE80211_MESH_AE_10; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae10); } else { meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } else { dir = IEEE80211_FC1_DIR_FROMDS; if (!IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) { /* proxy group */ meshae = IEEE80211_MESH_AE_01; meshhdrsize = sizeof(struct ieee80211_meshcntl_ae01); } else { /* group */ meshae = IEEE80211_MESH_AE_00; meshhdrsize = sizeof(struct ieee80211_meshcntl); } } } else { #endif /* * 4-address frames need to be generated for: * o packets sent through a WDS vap (IEEE80211_M_WDS) * o packets sent through a vap marked for relaying * (e.g. a station operating with dynamic WDS) */ is4addr = vap->iv_opmode == IEEE80211_M_WDS || ((vap->iv_flags_ext & IEEE80211_FEXT_4ADDR) && !IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)); if (is4addr) hdrsize += IEEE80211_ADDR_LEN; meshhdrsize = meshae = 0; #ifdef IEEE80211_SUPPORT_MESH } #endif /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; if (__predict_true((m->m_flags & M_FF) == 0)) { /* * Normal frame. */ m = ieee80211_mbuf_adjust(vap, hdrspace + meshhdrsize, key, m); if (m == NULL) { /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ goto bad; } /* NB: this could be optimized 'cuz of ieee80211_mbuf_adjust */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh.ether_type; } else { #ifdef IEEE80211_SUPPORT_SUPERG /* * Aggregated frame. Check if it's for AMSDU or FF. * * XXX TODO: IEEE80211_NODE_AMSDU* isn't implemented * anywhere for some reason. But, since 11n requires * AMSDU RX, we can just assume "11n" == "AMSDU". */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: called; M_FF\n", __func__); if (ieee80211_amsdu_tx_ok(ni)) { m = ieee80211_amsdu_encap(vap, m, hdrspace + meshhdrsize, key); is_amsdu = 1; } else { m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key); } if (m == NULL) #endif goto bad; } datalen = m->m_pkthdr.len; /* NB: w/o 802.11 header */ M_PREPEND(m, hdrspace + meshhdrsize, M_NOWAIT); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; goto bad; } wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(uint16_t *)wh->i_dur = 0; qos = NULL; /* NB: quiet compiler */ if (is4addr) { wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); } else switch (vap->iv_opmode) { case IEEE80211_M_STA: wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost); /* * NB: always use the bssid from iv_bss as the * neighbor's may be stale after an ibss merge */ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_bssid); break; case IEEE80211_M_HOSTAP: wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); break; #ifdef IEEE80211_SUPPORT_MESH case IEEE80211_M_MBSS: /* NB: offset by hdrspace to deal with DATAPAD */ mc = (struct ieee80211_meshcntl_ae10 *) (mtod(m, uint8_t *) + hdrspace); wh->i_fc[1] = dir; switch (meshae) { case IEEE80211_MESH_AE_00: /* no proxy */ mc->mc_flags = 0; if (dir == IEEE80211_FC1_DIR_DSTODS) { /* ucast */ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost); qos =((struct ieee80211_qosframe_addr4 *) wh)->i_qos; } else if (dir == IEEE80211_FC1_DIR_FROMDS) { /* mcast */ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; } break; case IEEE80211_MESH_AE_01: /* mcast, proxy */ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_myaddr); mc->mc_flags = 1; IEEE80211_ADDR_COPY(MC01(mc)->mc_addr4, eh.ether_shost); qos = ((struct ieee80211_qosframe *) wh)->i_qos; break; case IEEE80211_MESH_AE_10: /* ucast, proxy */ KASSERT(rt != NULL, ("route is NULL")); IEEE80211_ADDR_COPY(wh->i_addr1, rt->rt_nexthop); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, rt->rt_mesh_gate); IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, vap->iv_myaddr); mc->mc_flags = IEEE80211_MESH_AE_10; IEEE80211_ADDR_COPY(mc->mc_addr5, eh.ether_dhost); IEEE80211_ADDR_COPY(mc->mc_addr6, eh.ether_shost); qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; break; default: KASSERT(0, ("meshae %d", meshae)); break; } mc->mc_ttl = ms->ms_ttl; ms->ms_seq++; le32enc(mc->mc_seq, ms->ms_seq); break; #endif case IEEE80211_M_WDS: /* NB: is4addr should always be true */ default: goto bad; } if (m->m_flags & M_MORE_DATA) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; if (addqos) { int ac, tid; if (is4addr) { qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos; /* NB: mesh case handled earlier */ } else if (vap->iv_opmode != IEEE80211_M_MBSS) qos = ((struct ieee80211_qosframe *) wh)->i_qos; ac = M_WME_GETAC(m); /* map from access class/queue to 11e header priorty value */ tid = WME_AC_TO_TID(ac); qos[0] = tid & IEEE80211_QOS_TID; if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy) qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK; #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) qos[1] = IEEE80211_QOS_MC; else #endif qos[1] = 0; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; /* * If this is an A-MSDU then ensure we set the * relevant field. */ if (is_amsdu) qos[0] |= IEEE80211_QOS_AMSDU; /* * XXX TODO TX lock is needed for atomic updates of sequence * numbers. If the driver does it, then don't do it here; * and we don't need the TX lock held. */ if ((m->m_flags & M_AMPDU_MPDU) == 0) { /* * 802.11-2012 9.3.2.10 - * * If this is a multicast frame then we need * to ensure that the sequence number comes from * a separate seqno space and not the TID space. * * Otherwise multicast frames may actually cause * holes in the TX blockack window space and * upset various things. */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; else seqno = ni->ni_txseqs[tid]++; /* * NB: don't assign a sequence # to potential * aggregates; we expect this happens at the * point the frame comes off any aggregation q * as otherwise we may introduce holes in the * BA sequence space and/or make window accouting * more difficult. * * XXX may want to control this with a driver * capability; this may also change when we pull * aggregation up into net80211 */ seqno = ni->ni_txseqs[tid]++; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); } else { /* NB: zero out i_seq field (for s/w encryption etc) */ *(uint16_t *)wh->i_seq = 0; } } else { /* * XXX TODO TX lock is needed for atomic updates of sequence * numbers. If the driver does it, then don't do it here; * and we don't need the TX lock held. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); /* * XXX TODO: we shouldn't allow EAPOL, etc that would * be forced to be non-QoS traffic to be A-MSDU encapsulated. */ if (is_amsdu) printf("%s: XXX ERROR: is_amsdu set; not QoS!\n", __func__); } /* * Check if xmit fragmentation is required. * * If the hardware does fragmentation offload, then don't bother * doing it here. */ if (IEEE80211_CONF_FRAG_OFFLOAD(ic)) txfrag = 0; else txfrag = (m->m_pkthdr.len > vap->iv_fragthreshold && !IEEE80211_IS_MULTICAST(wh->i_addr1) && (vap->iv_caps & IEEE80211_C_TXFRAG) && (m->m_flags & (M_FF | M_AMPDU_MPDU)) == 0); if (key != NULL) { /* * IEEE 802.1X: send EAPOL frames always in the clear. * WPA/WPA2: encrypt EAPOL keys when pairwise keys are set. */ if ((m->m_flags & M_EAPOL) == 0 || ((vap->iv_flags & IEEE80211_F_WPA) && (vap->iv_opmode == IEEE80211_M_STA ? !IEEE80211_KEY_UNDEFINED(key) : !IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)))) { wh->i_fc[1] |= IEEE80211_FC1_PROTECTED; if (!ieee80211_crypto_enmic(vap, key, m, txfrag)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, eh.ether_dhost, "%s", "enmic failed, discard frame"); vap->iv_stats.is_crypto_enmicfail++; goto bad; } } } if (txfrag && !ieee80211_fragment(vap, m, hdrsize, key != NULL ? key->wk_cipher->ic_header : 0, vap->iv_fragthreshold)) goto bad; m->m_flags |= M_ENCAP; /* mark encapsulated */ IEEE80211_NODE_STAT(ni, tx_data); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { IEEE80211_NODE_STAT(ni, tx_mcast); m->m_flags |= M_MCAST; } else IEEE80211_NODE_STAT(ni, tx_ucast); IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen); return m; bad: if (m != NULL) m_freem(m); return NULL; #undef WH4 #undef MC01 } void ieee80211_free_mbuf(struct mbuf *m) { struct mbuf *next; if (m == NULL) return; do { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } while ((m = next) != NULL); } /* * Fragment the frame according to the specified mtu. * The size of the 802.11 header (w/o padding) is provided * so we don't need to recalculate it. We create a new * mbuf for each fragment and chain it through m_nextpkt; * we might be able to optimize this by reusing the original * packet's mbufs but that is significantly more complicated. */ static int ieee80211_fragment(struct ieee80211vap *vap, struct mbuf *m0, u_int hdrsize, u_int ciphdrsize, u_int mtu) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_frame *wh, *whf; struct mbuf *m, *prev; u_int totalhdrsize, fragno, fragsize, off, remainder, payload; u_int hdrspace; KASSERT(m0->m_nextpkt == NULL, ("mbuf already chained?")); KASSERT(m0->m_pkthdr.len > mtu, ("pktlen %u mtu %u", m0->m_pkthdr.len, mtu)); /* * Honor driver DATAPAD requirement. */ if (ic->ic_flags & IEEE80211_F_DATAPAD) hdrspace = roundup(hdrsize, sizeof(uint32_t)); else hdrspace = hdrsize; wh = mtod(m0, struct ieee80211_frame *); /* NB: mark the first frag; it will be propagated below */ wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG; totalhdrsize = hdrspace + ciphdrsize; fragno = 1; off = mtu - ciphdrsize; remainder = m0->m_pkthdr.len - off; prev = m0; do { fragsize = MIN(totalhdrsize + remainder, mtu); m = m_get2(fragsize, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) goto bad; /* leave room to prepend any cipher header */ m_align(m, fragsize - ciphdrsize); /* * Form the header in the fragment. Note that since * we mark the first fragment with the MORE_FRAG bit * it automatically is propagated to each fragment; we * need only clear it on the last fragment (done below). * NB: frag 1+ dont have Mesh Control field present. */ whf = mtod(m, struct ieee80211_frame *); memcpy(whf, wh, hdrsize); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { if (IEEE80211_IS_DSTODS(wh)) ((struct ieee80211_qosframe_addr4 *) whf)->i_qos[1] &= ~IEEE80211_QOS_MC; else ((struct ieee80211_qosframe *) whf)->i_qos[1] &= ~IEEE80211_QOS_MC; } #endif *(uint16_t *)&whf->i_seq[0] |= htole16( (fragno & IEEE80211_SEQ_FRAG_MASK) << IEEE80211_SEQ_FRAG_SHIFT); fragno++; payload = fragsize - totalhdrsize; /* NB: destination is known to be contiguous */ m_copydata(m0, off, payload, mtod(m, uint8_t *) + hdrspace); m->m_len = hdrspace + payload; m->m_pkthdr.len = hdrspace + payload; m->m_flags |= M_FRAG; /* chain up the fragment */ prev->m_nextpkt = m; prev = m; /* deduct fragment just formed */ remainder -= payload; off += payload; } while (remainder != 0); /* set the last fragment */ m->m_flags |= M_LASTFRAG; whf->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; /* strip first mbuf now that everything has been copied */ m_adj(m0, -(m0->m_pkthdr.len - (mtu - ciphdrsize))); m0->m_flags |= M_FIRSTFRAG | M_FRAG; vap->iv_stats.is_tx_fragframes++; vap->iv_stats.is_tx_frags += fragno-1; return 1; bad: /* reclaim fragments but leave original frame for caller to free */ ieee80211_free_mbuf(m0->m_nextpkt); m0->m_nextpkt = NULL; return 0; } /* * Add a supported rates element id to a frame. */ uint8_t * ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs) { int nrates; *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); return frm + nrates; } /* * Add an extended supported rates element id to a frame. */ uint8_t * ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs) { /* * Add an extended supported rates element if operating in 11g mode. */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } return frm; } /* * Add an ssid element to a frame. */ uint8_t * ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) { *frm++ = IEEE80211_ELEMID_SSID; *frm++ = len; memcpy(frm, ssid, len); return frm + len; } /* * Add an erp element to a frame. */ static uint8_t * ieee80211_add_erp(uint8_t *frm, struct ieee80211com *ic) { uint8_t erp; *frm++ = IEEE80211_ELEMID_ERP; *frm++ = 1; erp = 0; if (ic->ic_nonerpsta != 0) erp |= IEEE80211_ERP_NON_ERP_PRESENT; if (ic->ic_flags & IEEE80211_F_USEPROT) erp |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) erp |= IEEE80211_ERP_LONG_PREAMBLE; *frm++ = erp; return frm; } /* * Add a CFParams element to a frame. */ static uint8_t * ieee80211_add_cfparms(uint8_t *frm, struct ieee80211com *ic) { #define ADDSHORT(frm, v) do { \ le16enc(frm, v); \ frm += 2; \ } while (0) *frm++ = IEEE80211_ELEMID_CFPARMS; *frm++ = 6; *frm++ = 0; /* CFP count */ *frm++ = 2; /* CFP period */ ADDSHORT(frm, 0); /* CFP MaxDuration (TU) */ ADDSHORT(frm, 0); /* CFP CurRemaining (TU) */ return frm; #undef ADDSHORT } static __inline uint8_t * add_appie(uint8_t *frm, const struct ieee80211_appie *ie) { memcpy(frm, ie->ie_data, ie->ie_len); return frm + ie->ie_len; } static __inline uint8_t * add_ie(uint8_t *frm, const uint8_t *ie) { memcpy(frm, ie, 2 + ie[1]); return frm + 2 + ie[1]; } #define WME_OUI_BYTES 0x00, 0x50, 0xf2 /* * Add a WME information element to a frame. */ uint8_t * ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme) { static const struct ieee80211_wme_info info = { .wme_id = IEEE80211_ELEMID_VENDOR, .wme_len = sizeof(struct ieee80211_wme_info) - 2, .wme_oui = { WME_OUI_BYTES }, .wme_type = WME_OUI_TYPE, .wme_subtype = WME_INFO_OUI_SUBTYPE, .wme_version = WME_VERSION, .wme_info = 0, }; memcpy(frm, &info, sizeof(info)); return frm + sizeof(info); } /* * Add a WME parameters element to a frame. */ static uint8_t * ieee80211_add_wme_param(uint8_t *frm, struct ieee80211_wme_state *wme) { #define SM(_v, _f) (((_v) << _f##_S) & _f) #define ADDSHORT(frm, v) do { \ le16enc(frm, v); \ frm += 2; \ } while (0) /* NB: this works 'cuz a param has an info at the front */ static const struct ieee80211_wme_info param = { .wme_id = IEEE80211_ELEMID_VENDOR, .wme_len = sizeof(struct ieee80211_wme_param) - 2, .wme_oui = { WME_OUI_BYTES }, .wme_type = WME_OUI_TYPE, .wme_subtype = WME_PARAM_OUI_SUBTYPE, .wme_version = WME_VERSION, }; int i; memcpy(frm, ¶m, sizeof(param)); frm += __offsetof(struct ieee80211_wme_info, wme_info); *frm++ = wme->wme_bssChanParams.cap_info; /* AC info */ *frm++ = 0; /* reserved field */ for (i = 0; i < WME_NUM_AC; i++) { const struct wmeParams *ac = &wme->wme_bssChanParams.cap_wmeParams[i]; *frm++ = SM(i, WME_PARAM_ACI) | SM(ac->wmep_acm, WME_PARAM_ACM) | SM(ac->wmep_aifsn, WME_PARAM_AIFSN) ; *frm++ = SM(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX) | SM(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN) ; ADDSHORT(frm, ac->wmep_txopLimit); } return frm; #undef SM #undef ADDSHORT } #undef WME_OUI_BYTES /* * Add an 11h Power Constraint element to a frame. */ static uint8_t * ieee80211_add_powerconstraint(uint8_t *frm, struct ieee80211vap *vap) { const struct ieee80211_channel *c = vap->iv_bss->ni_chan; /* XXX per-vap tx power limit? */ int8_t limit = vap->iv_ic->ic_txpowlimit / 2; frm[0] = IEEE80211_ELEMID_PWRCNSTR; frm[1] = 1; frm[2] = c->ic_maxregpower > limit ? c->ic_maxregpower - limit : 0; return frm + 3; } /* * Add an 11h Power Capability element to a frame. */ static uint8_t * ieee80211_add_powercapability(uint8_t *frm, const struct ieee80211_channel *c) { frm[0] = IEEE80211_ELEMID_PWRCAP; frm[1] = 2; frm[2] = c->ic_minpower; frm[3] = c->ic_maxpower; return frm + 4; } /* * Add an 11h Supported Channels element to a frame. */ static uint8_t * ieee80211_add_supportedchannels(uint8_t *frm, struct ieee80211com *ic) { static const int ielen = 26; frm[0] = IEEE80211_ELEMID_SUPPCHAN; frm[1] = ielen; /* XXX not correct */ memcpy(frm+2, ic->ic_chan_avail, ielen); return frm + 2 + ielen; } /* * Add an 11h Quiet time element to a frame. */ static uint8_t * ieee80211_add_quiet(uint8_t *frm, struct ieee80211vap *vap, int update) { struct ieee80211_quiet_ie *quiet = (struct ieee80211_quiet_ie *) frm; quiet->quiet_ie = IEEE80211_ELEMID_QUIET; quiet->len = 6; /* * Only update every beacon interval - otherwise probe responses * would update the quiet count value. */ if (update) { if (vap->iv_quiet_count_value == 1) vap->iv_quiet_count_value = vap->iv_quiet_count; else if (vap->iv_quiet_count_value > 1) vap->iv_quiet_count_value--; } if (vap->iv_quiet_count_value == 0) { /* value 0 is reserved as per 802.11h standerd */ vap->iv_quiet_count_value = 1; } quiet->tbttcount = vap->iv_quiet_count_value; quiet->period = vap->iv_quiet_period; quiet->duration = htole16(vap->iv_quiet_duration); quiet->offset = htole16(vap->iv_quiet_offset); return frm + sizeof(*quiet); } /* * Add an 11h Channel Switch Announcement element to a frame. * Note that we use the per-vap CSA count to adjust the global * counter so we can use this routine to form probe response * frames and get the current count. */ static uint8_t * ieee80211_add_csa(uint8_t *frm, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) frm; csa->csa_ie = IEEE80211_ELEMID_CSA; csa->csa_len = 3; csa->csa_mode = 1; /* XXX force quiet on channel */ csa->csa_newchan = ieee80211_chan2ieee(ic, ic->ic_csa_newchan); csa->csa_count = ic->ic_csa_count - vap->iv_csa_count; return frm + sizeof(*csa); } /* * Add an 11h country information element to a frame. */ static uint8_t * ieee80211_add_countryie(uint8_t *frm, struct ieee80211com *ic) { if (ic->ic_countryie == NULL || ic->ic_countryie_chan != ic->ic_bsschan) { /* * Handle lazy construction of ie. This is done on * first use and after a channel change that requires * re-calculation. */ if (ic->ic_countryie != NULL) IEEE80211_FREE(ic->ic_countryie, M_80211_NODE_IE); ic->ic_countryie = ieee80211_alloc_countryie(ic); if (ic->ic_countryie == NULL) return frm; ic->ic_countryie_chan = ic->ic_bsschan; } return add_appie(frm, ic->ic_countryie); } uint8_t * ieee80211_add_wpa(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA1 && vap->iv_wpa_ie != NULL) return (add_ie(frm, vap->iv_wpa_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_rsn(uint8_t *frm, const struct ieee80211vap *vap) { if (vap->iv_flags & IEEE80211_F_WPA2 && vap->iv_rsn_ie != NULL) return (add_ie(frm, vap->iv_rsn_ie)); else { /* XXX else complain? */ return (frm); } } uint8_t * ieee80211_add_qos(uint8_t *frm, const struct ieee80211_node *ni) { if (ni->ni_flags & IEEE80211_NODE_QOS) { *frm++ = IEEE80211_ELEMID_QOS; *frm++ = 1; *frm++ = 0; } return (frm); } /* * Send a probe request frame with the specified ssid * and any optional information element data. */ int ieee80211_send_probereq(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t *ssid, size_t ssidlen) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node *bss; const struct ieee80211_txparam *tp; struct ieee80211_bpf_params params; const struct ieee80211_rateset *rs; struct mbuf *m; uint8_t *frm; int ret; bss = ieee80211_ref_node(vap->iv_bss); if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni, "block %s frame in CAC state", "probe request"); vap->iv_stats.is_tx_badstate++; ieee80211_free_node(bss); return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); /* * prreq frame format * [tlv] ssid * [tlv] supported rates * [tlv] RSN (optional) * [tlv] extended supported rates * [tlv] HT cap (optional) * [tlv] VHT cap (optional) * [tlv] WPA (optional) * [tlv] user-specified ie's */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_vhtcap) + sizeof(struct ieee80211_ie_htinfo) /* XXX not needed? */ + sizeof(struct ieee80211_ie_wpa) + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_wpa) + (vap->iv_appie_probereq != NULL ? vap->iv_appie_probereq->ie_len : 0) ); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; ieee80211_free_node(ni); ieee80211_free_node(bss); return ENOMEM; } frm = ieee80211_add_ssid(frm, ssid, ssidlen); rs = ieee80211_get_suprates(ic, ic->ic_curchan); frm = ieee80211_add_rates(frm, rs); frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_xrates(frm, rs); /* * Note: we can't use bss; we don't have one yet. * * So, we should announce our capabilities * in this channel mode (2g/5g), not the * channel details itself. */ if ((vap->iv_opmode == IEEE80211_M_IBSS) && (vap->iv_flags_ht & IEEE80211_FHT_HT)) { struct ieee80211_channel *c; /* * Get the HT channel that we should try upgrading to. * If we can do 40MHz then this'll upgrade it appropriately. */ c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); frm = ieee80211_add_htcap_ch(frm, vap, c); } /* * XXX TODO: need to figure out what/how to update the * VHT channel. */ #if 0 (vap->iv_flags_vht & IEEE80211_FVHT_VHT) { struct ieee80211_channel *c; c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); c = ieee80211_vht_adjust_channel(ic, c, vap->iv_flags_vht); frm = ieee80211_add_vhtcap_ch(frm, vap, c); } #endif frm = ieee80211_add_wpa(frm, vap); if (vap->iv_appie_probereq != NULL) frm = add_appie(frm, vap->iv_appie_probereq); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); KASSERT(M_LEADINGSPACE(m) >= sizeof(struct ieee80211_frame), ("leading space %zd", M_LEADINGSPACE(m))); M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); if (m == NULL) { /* NB: cannot happen */ ieee80211_free_node(ni); ieee80211_free_node(bss); return ENOMEM; } IEEE80211_TX_LOCK(ic); ieee80211_send_setup(ni, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ, IEEE80211_NONQOS_TID, sa, da, bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_NODE_STAT(ni, tx_probereq); IEEE80211_NODE_STAT(ni, tx_mgmt); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe req on channel %u bssid %s sa %6D da %6D ssid \"%.*s\"\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(bssid), sa, ":", da, ":", ssidlen, ssid); memset(¶ms, 0, sizeof(params)); params.ibp_pri = M_WME_GETAC(m); tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; params.ibp_rate0 = tp->mgmtrate; if (IEEE80211_IS_MULTICAST(da)) { params.ibp_flags |= IEEE80211_BPF_NOACK; params.ibp_try0 = 1; } else params.ibp_try0 = tp->maxretry; params.ibp_power = ni->ni_txpower; ret = ieee80211_raw_output(vap, ni, m, ¶ms); IEEE80211_TX_UNLOCK(ic); ieee80211_free_node(bss); return (ret); } /* * Calculate capability information for mgt frames. */ uint16_t ieee80211_getcapinfo(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; uint16_t capinfo; KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("station mode")); if (vap->iv_opmode == IEEE80211_M_HOSTAP) capinfo = IEEE80211_CAPINFO_ESS; else if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = 0; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(chan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHSLOT) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if (IEEE80211_IS_CHAN_5GHZ(chan) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; return capinfo; } /* * Send a management frame. The node is for the destination (or ic_bss * when in station mode). Nodes other than ic_bss have their reference * count bumped to reflect our use for an indeterminant time. */ int ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg) { #define HTFLAGS (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT) #define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node *bss = vap->iv_bss; struct ieee80211_bpf_params params; struct mbuf *m; uint8_t *frm; uint16_t capinfo; int has_challenge, is_shared_key, ret, status; KASSERT(ni != NULL, ("null node")); /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); ieee80211_ref_node(ni); memset(¶ms, 0, sizeof(params)); switch (type) { case IEEE80211_FC0_SUBTYPE_AUTH: status = arg >> 16; arg &= 0xffff; has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE || arg == IEEE80211_AUTH_SHARED_RESPONSE) && ni->ni_challenge != NULL); /* * Deduce whether we're doing open authentication or * shared key authentication. We do the latter if * we're in the middle of a shared key authentication * handshake or if we're initiating an authentication * request and configured to use shared key. */ is_shared_key = has_challenge || arg >= IEEE80211_AUTH_SHARED_RESPONSE || (arg == IEEE80211_AUTH_SHARED_REQUEST && bss->ni_authmode == IEEE80211_AUTH_SHARED); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 3 * sizeof(uint16_t) + (has_challenge && status == IEEE80211_STATUS_SUCCESS ? sizeof(uint16_t)+IEEE80211_CHALLENGE_LEN : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); ((uint16_t *)frm)[0] = (is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED) : htole16(IEEE80211_AUTH_ALG_OPEN); ((uint16_t *)frm)[1] = htole16(arg); /* sequence number */ ((uint16_t *)frm)[2] = htole16(status);/* status */ if (has_challenge && status == IEEE80211_STATUS_SUCCESS) { ((uint16_t *)frm)[3] = htole16((IEEE80211_CHALLENGE_LEN << 8) | IEEE80211_ELEMID_CHALLENGE); memcpy(&((uint16_t *)frm)[4], ni->ni_challenge, IEEE80211_CHALLENGE_LEN); m->m_pkthdr.len = m->m_len = 4 * sizeof(uint16_t) + IEEE80211_CHALLENGE_LEN; if (arg == IEEE80211_AUTH_SHARED_RESPONSE) { IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "request encrypt frame (%s)", __func__); /* mark frame for encryption */ params.ibp_flags |= IEEE80211_BPF_CRYPTO; } } else m->m_pkthdr.len = m->m_len = 3 * sizeof(uint16_t); /* XXX not right for shared key */ if (status == IEEE80211_STATUS_SUCCESS) IEEE80211_NODE_STAT(ni, tx_auth); else IEEE80211_NODE_STAT(ni, tx_auth_fail); if (vap->iv_opmode == IEEE80211_M_STA) ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni, "send station deauthenticate (reason: %d (%s))", arg, ieee80211_reason_to_string(arg)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_deauth); IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg); ieee80211_node_unauthorize(ni); /* port closed */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: /* * asreq frame format * [2] capability information * [2] listen interval * [6*] current AP address (reassoc only) * [tlv] ssid * [tlv] supported rates * [tlv] extended supported rates * [4] power capability (optional) * [28] supported channels (optional) * [tlv] HT capabilities * [tlv] VHT capabilities * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Atheros capabilities (if negotiated) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + IEEE80211_ADDR_LEN + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + 4 + 2 + 26 + sizeof(struct ieee80211_wme_info) + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_vhtcap) + 4 + sizeof(struct ieee80211_ie_htcap) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_wpa != NULL ? vap->iv_appie_wpa->ie_len : 0) + (vap->iv_appie_assocreq != NULL ? vap->iv_appie_assocreq->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode %u", vap->iv_opmode)); capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; /* * NB: Some 11a AP's reject the request when * short preamble is set. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) && (ic->ic_caps & IEEE80211_C_SHSLOT)) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if ((ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) && (vap->iv_flags & IEEE80211_F_DOTH)) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; *(uint16_t *)frm = htole16(capinfo); frm += 2; KASSERT(bss->ni_intval != 0, ("beacon interval is zero!")); *(uint16_t *)frm = htole16(howmany(ic->ic_lintval, bss->ni_intval)); frm += 2; if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { IEEE80211_ADDR_COPY(frm, bss->ni_bssid); frm += IEEE80211_ADDR_LEN; } frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen); frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_xrates(frm, &ni->ni_rates); if (capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) { frm = ieee80211_add_powercapability(frm, ic->ic_curchan); frm = ieee80211_add_supportedchannels(frm, ic); } /* * Check the channel - we may be using an 11n NIC with an * 11n capable station, but we're configured to be an 11b * channel. */ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_HTCAP) { frm = ieee80211_add_htcap(frm, ni); } if ((vap->iv_flags_vht & IEEE80211_FVHT_VHT) && IEEE80211_IS_CHAN_VHT(ni->ni_chan) && ni->ni_ies.vhtcap_ie != NULL && ni->ni_ies.vhtcap_ie[0] == IEEE80211_ELEMID_VHT_CAP) { frm = ieee80211_add_vhtcap(frm, ni); } frm = ieee80211_add_wpa(frm, vap); if ((ic->ic_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_info(frm, &ic->ic_wme); /* * Same deal - only send HT info if we're on an 11n * capable channel. */ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) && IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_ies.htcap_ie != NULL && ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_VENDOR) { frm = ieee80211_add_htcap_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) { frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); } #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocreq != NULL) frm = add_appie(frm, vap->iv_appie_assocreq); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); ieee80211_add_callback(m, ieee80211_tx_mgt_cb, (void *) vap->iv_state); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] HT capabilities (standard, if STA enabled) * [tlv] HT information (standard, if STA enabled) * [tlv] VHT capabilities (standard, if STA enabled) * [tlv] VHT information (standard, if STA enabled) * [tlv] WME (if configured and STA enabled) * [tlv] HT capabilities (vendor OUI, if STA enabled) * [tlv] HT information (vendor OUI, if STA enabled) * [tlv] Atheros capabilities (if STA enabled) * [tlv] AppIE's (optional) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_RATE_SIZE + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) + 4 + sizeof(struct ieee80211_ie_vhtcap) + sizeof(struct ieee80211_ie_vht_operation) + sizeof(struct ieee80211_wme_param) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif + (vap->iv_appie_assocresp != NULL ? vap->iv_appie_assocresp->ie_len : 0) ); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; *(uint16_t *)frm = htole16(arg); /* status */ frm += 2; if (arg == IEEE80211_STATUS_SUCCESS) { *(uint16_t *)frm = htole16(ni->ni_associd); IEEE80211_NODE_STAT(ni, tx_assoc); } else IEEE80211_NODE_STAT(ni, tx_assoc_fail); frm += 2; frm = ieee80211_add_rates(frm, &ni->ni_rates); frm = ieee80211_add_xrates(frm, &ni->ni_rates); /* NB: respond according to what we received */ if ((ni->ni_flags & HTFLAGS) == IEEE80211_NODE_HT) { frm = ieee80211_add_htcap(frm, ni); frm = ieee80211_add_htinfo(frm, ni); } if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) frm = ieee80211_add_wme_param(frm, &ic->ic_wme); if ((ni->ni_flags & HTFLAGS) == HTFLAGS) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } if (ni->ni_flags & IEEE80211_NODE_VHT) { frm = ieee80211_add_vhtcap(frm, ni); frm = ieee80211_add_vhtinfo(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) frm = ieee80211_add_ath(frm, IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS), ((vap->iv_flags & IEEE80211_F_WPA) == 0 && ni->ni_authmode != IEEE80211_AUTH_8021X) ? vap->iv_def_txkey : IEEE80211_KEYIX_NONE); #endif /* IEEE80211_SUPPORT_SUPERG */ if (vap->iv_appie_assocresp != NULL) frm = add_appie(frm, vap->iv_appie_assocresp); m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); break; case IEEE80211_FC0_SUBTYPE_DISASSOC: IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "send station disassociate (reason: %d (%s))", arg, ieee80211_reason_to_string(arg)); m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), sizeof(uint16_t)); if (m == NULL) senderr(ENOMEM, is_tx_nobuf); *(uint16_t *)frm = htole16(arg); /* reason */ m->m_pkthdr.len = m->m_len = sizeof(uint16_t); IEEE80211_NODE_STAT(ni, tx_disassoc); IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg); break; default: IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni, "invalid mgmt frame type %u", type); senderr(EINVAL, is_tx_unknownmgt); /* NOTREACHED */ } /* NB: force non-ProbeResp frames to the highest queue */ params.ibp_pri = WME_AC_VO; params.ibp_rate0 = bss->ni_txparms->mgmtrate; /* NB: we know all frames are unicast */ params.ibp_try0 = bss->ni_txparms->maxretry; params.ibp_power = bss->ni_txpower; return ieee80211_mgmt_output(ni, m, type, ¶ms); bad: ieee80211_free_node(ni); return ret; #undef senderr #undef HTFLAGS } /* * Return an mbuf with a probe response frame in it. * Space is left to prepend and 802.11 header at the * front but it's left to the caller to fill in. */ struct mbuf * ieee80211_alloc_proberesp(struct ieee80211_node *bss, int legacy) { struct ieee80211vap *vap = bss->ni_vap; struct ieee80211com *ic = bss->ni_ic; const struct ieee80211_rateset *rs; struct mbuf *m; uint16_t capinfo; uint8_t *frm; /* * probe response frame format * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [tlv] parameter set (FH/DS) * [tlv] parameter set (IBSS) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN (optional) * [tlv] HT capabilities * [tlv] HT information * [tlv] VHT capabilities * [tlv] VHT information * [tlv] WPA (optional) * [tlv] WME (optional) * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities * [tlv] AppIE's (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) */ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), 8 + sizeof(uint16_t) + sizeof(uint16_t) + 2 + IEEE80211_NWID_LEN + 2 + IEEE80211_RATE_SIZE + 7 /* max(7,3) */ + IEEE80211_COUNTRY_MAX_SIZE + 3 + sizeof(struct ieee80211_csa_ie) + sizeof(struct ieee80211_quiet_ie) + 3 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_ie_htcap) + sizeof(struct ieee80211_ie_htinfo) + sizeof(struct ieee80211_ie_wpa) + sizeof(struct ieee80211_wme_param) + 4 + sizeof(struct ieee80211_ie_htcap) + 4 + sizeof(struct ieee80211_ie_htinfo) + sizeof(struct ieee80211_ie_vhtcap) + sizeof(struct ieee80211_ie_vht_operation) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + IEEE80211_MESHID_LEN + sizeof(struct ieee80211_meshconf_ie) #endif + (vap->iv_appie_proberesp != NULL ? vap->iv_appie_proberesp->ie_len : 0) ); if (m == NULL) { vap->iv_stats.is_tx_nobuf++; return NULL; } memset(frm, 0, 8); /* timestamp should be filled later */ frm += 8; *(uint16_t *)frm = htole16(bss->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, bss->ni_chan); *(uint16_t *)frm = htole16(capinfo); frm += 2; frm = ieee80211_add_ssid(frm, bss->ni_essid, bss->ni_esslen); rs = ieee80211_get_suprates(ic, bss->ni_chan); frm = ieee80211_add_rates(frm, rs); if (IEEE80211_IS_CHAN_FHSS(bss->ni_chan)) { *frm++ = IEEE80211_ELEMID_FHPARMS; *frm++ = 5; *frm++ = bss->ni_fhdwell & 0x00ff; *frm++ = (bss->ni_fhdwell >> 8) & 0x00ff; *frm++ = IEEE80211_FH_CHANSET( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = IEEE80211_FH_CHANPAT( ieee80211_chan2ieee(ic, bss->ni_chan)); *frm++ = bss->ni_fhindex; } else { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, bss->ni_chan); } if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ } if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(bss->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet) frm = ieee80211_add_quiet(frm, vap, 0); } } if (IEEE80211_IS_CHAN_ANYG(bss->ni_chan)) frm = ieee80211_add_erp(frm, ic); frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); /* * NB: legacy 11b clients do not get certain ie's. * The caller identifies such clients by passing * a token in legacy to us. Could expand this to be * any legacy client for stuff like HT ie's. */ if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap(frm, bss); frm = ieee80211_add_htinfo(frm, bss); } if (IEEE80211_IS_CHAN_VHT(bss->ni_chan) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_vhtcap(frm, bss); frm = ieee80211_add_vhtinfo(frm, bss); } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) frm = ieee80211_add_wme_param(frm, &ic->ic_wme); if (IEEE80211_IS_CHAN_HT(bss->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) && legacy != IEEE80211_SEND_LEGACY_11B) { frm = ieee80211_add_htcap_vendor(frm, bss); frm = ieee80211_add_htinfo_vendor(frm, bss); } #ifdef IEEE80211_SUPPORT_SUPERG if ((vap->iv_flags & IEEE80211_F_ATHEROS) && legacy != IEEE80211_SEND_LEGACY_11B) frm = ieee80211_add_athcaps(frm, bss); #endif if (vap->iv_appie_proberesp != NULL) frm = add_appie(frm, vap->iv_appie_proberesp); #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); frm = ieee80211_add_meshconf(frm, vap); } #endif m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); return m; } /* * Send a probe response frame to the specified mac address. * This does not go through the normal mgt frame api so we * can specify the destination address and re-use the bss node * for the sta reference. */ int ieee80211_send_proberesp(struct ieee80211vap *vap, const uint8_t da[IEEE80211_ADDR_LEN], int legacy) { struct ieee80211_node *bss = vap->iv_bss; struct ieee80211com *ic = vap->iv_ic; struct mbuf *m; int ret; if (vap->iv_state == IEEE80211_S_CAC) { IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss, "block %s frame in CAC state", "probe response"); vap->iv_stats.is_tx_badstate++; return EIO; /* XXX */ } /* * Hold a reference on the node so it doesn't go away until after * the xmit is complete all the way in the driver. On error we * will remove our reference. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__, bss, ether_sprintf(bss->ni_macaddr), ieee80211_node_refcnt(bss)+1); ieee80211_ref_node(bss); m = ieee80211_alloc_proberesp(bss, legacy); if (m == NULL) { ieee80211_free_node(bss); return ENOMEM; } M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); KASSERT(m != NULL, ("no room for header")); IEEE80211_TX_LOCK(ic); ieee80211_send_setup(bss, m, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP, IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid); /* XXX power management? */ m->m_flags |= M_ENCAP; /* mark encapsulated */ M_WME_SETAC(m, WME_AC_BE); IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, "send probe resp on channel %u to %s%s\n", ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(da), legacy ? " " : ""); IEEE80211_NODE_STAT(bss, tx_mgmt); ret = ieee80211_raw_output(vap, bss, m, NULL); IEEE80211_TX_UNLOCK(ic); return (ret); } /* * Allocate and build a RTS (Request To Send) control frame. */ struct mbuf * ieee80211_alloc_rts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], const uint8_t ta[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_rts *rts; struct mbuf *m; /* XXX honor ic_headroom */ m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) { rts = mtod(m, struct ieee80211_frame_rts *); rts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_RTS; rts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)rts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(rts->i_ra, ra); IEEE80211_ADDR_COPY(rts->i_ta, ta); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_rts); } return m; } /* * Allocate and build a CTS (Clear To Send) control frame. */ struct mbuf * ieee80211_alloc_cts(struct ieee80211com *ic, const uint8_t ra[IEEE80211_ADDR_LEN], uint16_t dur) { struct ieee80211_frame_cts *cts; struct mbuf *m; /* XXX honor ic_headroom */ m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) { cts = mtod(m, struct ieee80211_frame_cts *); cts->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_CTS; cts->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(u_int16_t *)cts->i_dur = htole16(dur); IEEE80211_ADDR_COPY(cts->i_ra, ra); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_cts); } return m; } +/* + * Wrapper for CTS/RTS frame allocation. + */ +struct mbuf * +ieee80211_alloc_prot(struct ieee80211_node *ni, const struct mbuf *m, + uint8_t rate, int prot) +{ + struct ieee80211com *ic = ni->ni_ic; + const struct ieee80211_frame *wh; + struct mbuf *mprot; + uint16_t dur; + int pktlen, isshort; + + KASSERT(prot == IEEE80211_PROT_RTSCTS || + prot == IEEE80211_PROT_CTSONLY, + ("wrong protection type %d", prot)); + + wh = mtod(m, const struct ieee80211_frame *); + pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; + isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; + dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); + + if (prot == IEEE80211_PROT_RTSCTS) { + /* NB: CTS is the same size as an ACK */ + dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); + mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); + } else + mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); + + return (mprot); +} + static void ieee80211_tx_mgt_timeout(void *arg) { struct ieee80211vap *vap = arg; IEEE80211_LOCK(vap->iv_ic); if (vap->iv_state != IEEE80211_S_INIT && (vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* * NB: it's safe to specify a timeout as the reason here; * it'll only be used in the right state. */ ieee80211_new_state_locked(vap, IEEE80211_S_SCAN, IEEE80211_SCAN_FAIL_TIMEOUT); } IEEE80211_UNLOCK(vap->iv_ic); } /* * This is the callback set on net80211-sourced transmitted * authentication request frames. * * This does a couple of things: * * + If the frame transmitted was a success, it schedules a future * event which will transition the interface to scan. * If a state transition _then_ occurs before that event occurs, * said state transition will cancel this callout. * * + If the frame transmit was a failure, it immediately schedules * the transition back to scan. */ static void ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status) { struct ieee80211vap *vap = ni->ni_vap; enum ieee80211_state ostate = (enum ieee80211_state) arg; /* * Frame transmit completed; arrange timer callback. If * transmit was successfully we wait for response. Otherwise * we arrange an immediate callback instead of doing the * callback directly since we don't know what state the driver * is in (e.g. what locks it is holding). This work should * not be too time-critical and not happen too often so the * added overhead is acceptable. * * XXX what happens if !acked but response shows up before callback? */ if (vap->iv_state == ostate) { callout_reset(&vap->iv_mgtsend, status == 0 ? IEEE80211_TRANS_WAIT*hz : 0, ieee80211_tx_mgt_timeout, vap); } } static void ieee80211_beacon_construct(struct mbuf *m, uint8_t *frm, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211com *ic = ni->ni_ic; struct ieee80211_rateset *rs = &ni->ni_rates; uint16_t capinfo; /* * beacon frame format * * TODO: update to 802.11-2012; a lot of stuff has changed; * vendor extensions should be at the end, etc. * * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * XXX TODO: Quiet * XXX TODO: IBSS DFS * XXX TODO: TPC report * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * XXX TODO: BSSLOAD * (XXX EDCA parameter set, QoS capability?) * XXX TODO: AP channel report * * [tlv] HT capabilities * [tlv] HT information * XXX TODO: 20/40 BSS coexistence * Mesh: * XXX TODO: Meshid * XXX TODO: mesh config * XXX TODO: mesh awake window * XXX TODO: beacon timing (mesh, etc) * XXX TODO: MCCAOP Advertisement Overview * XXX TODO: MCCAOP Advertisement * XXX TODO: Mesh channel switch parameters * VHT: * XXX TODO: VHT capabilities * XXX TODO: VHT operation * XXX TODO: VHT transmit power envelope * XXX TODO: channel switch wrapper element * XXX TODO: extended BSS load element * * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * [tlv] Atheros capabilities (optional) * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) */ memset(bo, 0, sizeof(*bo)); memset(frm, 0, 8); /* XXX timestamp is set by hardware/driver */ frm += 8; *(uint16_t *)frm = htole16(ni->ni_intval); frm += 2; capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); bo->bo_caps = (uint16_t *)frm; *(uint16_t *)frm = htole16(capinfo); frm += 2; *frm++ = IEEE80211_ELEMID_SSID; if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) { *frm++ = ni->ni_esslen; memcpy(frm, ni->ni_essid, ni->ni_esslen); frm += ni->ni_esslen; } else *frm++ = 0; frm = ieee80211_add_rates(frm, rs); if (!IEEE80211_IS_CHAN_FHSS(ni->ni_chan)) { *frm++ = IEEE80211_ELEMID_DSPARMS; *frm++ = 1; *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan); } if (ic->ic_flags & IEEE80211_F_PCF) { bo->bo_cfp = frm; frm = ieee80211_add_cfparms(frm, ic); } bo->bo_tim = frm; if (vap->iv_opmode == IEEE80211_M_IBSS) { *frm++ = IEEE80211_ELEMID_IBSSPARMS; *frm++ = 2; *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */ bo->bo_tim_len = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* TIM IE is the same for Mesh and Hostap */ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) frm; tie->tim_ie = IEEE80211_ELEMID_TIM; tie->tim_len = 4; /* length */ tie->tim_count = 0; /* DTIM count */ tie->tim_period = vap->iv_dtim_period; /* DTIM period */ tie->tim_bitctl = 0; /* bitmap control */ tie->tim_bitmap[0] = 0; /* Partial Virtual Bitmap */ frm += sizeof(struct ieee80211_tim_ie); bo->bo_tim_len = 1; } bo->bo_tim_trailer = frm; if ((vap->iv_flags & IEEE80211_F_DOTH) || (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) frm = ieee80211_add_countryie(frm, ic); if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) frm = ieee80211_add_powerconstraint(frm, vap); bo->bo_csa = frm; if (ic->ic_flags & IEEE80211_F_CSAPENDING) frm = ieee80211_add_csa(frm, vap); } else bo->bo_csa = frm; bo->bo_quiet = NULL; if (vap->iv_flags & IEEE80211_F_DOTH) { if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS) && (vap->iv_quiet == 1)) { /* * We only insert the quiet IE offset if * the quiet IE is enabled. Otherwise don't * put it here or we'll just overwrite * some other beacon contents. */ if (vap->iv_quiet) { bo->bo_quiet = frm; frm = ieee80211_add_quiet(frm,vap, 0); } } } if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) { bo->bo_erp = frm; frm = ieee80211_add_erp(frm, ic); } frm = ieee80211_add_xrates(frm, rs); frm = ieee80211_add_rsn(frm, vap); if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { frm = ieee80211_add_htcap(frm, ni); bo->bo_htinfo = frm; frm = ieee80211_add_htinfo(frm, ni); } if (IEEE80211_IS_CHAN_VHT(ni->ni_chan)) { frm = ieee80211_add_vhtcap(frm, ni); bo->bo_vhtinfo = frm; frm = ieee80211_add_vhtinfo(frm, ni); /* Transmit power envelope */ /* Channel switch wrapper element */ /* Extended bss load element */ } frm = ieee80211_add_wpa(frm, vap); if (vap->iv_flags & IEEE80211_F_WME) { bo->bo_wme = frm; frm = ieee80211_add_wme_param(frm, &ic->ic_wme); } if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT)) { frm = ieee80211_add_htcap_vendor(frm, ni); frm = ieee80211_add_htinfo_vendor(frm, ni); } #ifdef IEEE80211_SUPPORT_SUPERG if (vap->iv_flags & IEEE80211_F_ATHEROS) { bo->bo_ath = frm; frm = ieee80211_add_athcaps(frm, ni); } #endif #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { bo->bo_tdma = frm; frm = ieee80211_add_tdma(frm, vap); } #endif if (vap->iv_appie_beacon != NULL) { bo->bo_appie = frm; bo->bo_appie_len = vap->iv_appie_beacon->ie_len; frm = add_appie(frm, vap->iv_appie_beacon); } /* XXX TODO: move meshid/meshconf up to before vendor extensions? */ #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) { frm = ieee80211_add_meshid(frm, vap); bo->bo_meshconf = frm; frm = ieee80211_add_meshconf(frm, vap); } #endif bo->bo_tim_trailer_len = frm - bo->bo_tim_trailer; bo->bo_csa_trailer_len = frm - bo->bo_csa; m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *); } /* * Allocate a beacon frame and fillin the appropriate bits. */ struct mbuf * ieee80211_beacon_alloc(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_frame *wh; struct mbuf *m; int pktlen; uint8_t *frm; /* * Update the "We're putting the quiet IE in the beacon" state. */ if (vap->iv_quiet == 1) vap->iv_flags_ext |= IEEE80211_FEXT_QUIET_IE; else if (vap->iv_quiet == 0) vap->iv_flags_ext &= ~IEEE80211_FEXT_QUIET_IE; /* * beacon frame format * * Note: This needs updating for 802.11-2012. * * [8] time stamp * [2] beacon interval * [2] cabability information * [tlv] ssid * [tlv] supported rates * [3] parameter set (DS) * [8] CF parameter set (optional) * [tlv] parameter set (IBSS/TIM) * [tlv] country (optional) * [3] power control (optional) * [5] channel switch announcement (CSA) (optional) * [tlv] extended rate phy (ERP) * [tlv] extended supported rates * [tlv] RSN parameters * [tlv] HT capabilities * [tlv] HT information * [tlv] VHT capabilities * [tlv] VHT operation * [tlv] Vendor OUI HT capabilities (optional) * [tlv] Vendor OUI HT information (optional) * XXX Vendor-specific OIDs (e.g. Atheros) * [tlv] WPA parameters * [tlv] WME parameters * [tlv] TDMA parameters (optional) * [tlv] Mesh ID (MBSS) * [tlv] Mesh Conf (MBSS) * [tlv] application data (optional) * NB: we allocate the max space required for the TIM bitmap. * XXX how big is this? */ pktlen = 8 /* time stamp */ + sizeof(uint16_t) /* beacon interval */ + sizeof(uint16_t) /* capabilities */ + 2 + ni->ni_esslen /* ssid */ + 2 + IEEE80211_RATE_SIZE /* supported rates */ + 2 + 1 /* DS parameters */ + 2 + 6 /* CF parameters */ + 2 + 4 + vap->iv_tim_len /* DTIM/IBSSPARMS */ + IEEE80211_COUNTRY_MAX_SIZE /* country */ + 2 + 1 /* power control */ + sizeof(struct ieee80211_csa_ie) /* CSA */ + sizeof(struct ieee80211_quiet_ie) /* Quiet */ + 2 + 1 /* ERP */ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) + (vap->iv_caps & IEEE80211_C_WPA ? /* WPA 1+2 */ 2*sizeof(struct ieee80211_ie_wpa) : 0) /* XXX conditional? */ + 4+2*sizeof(struct ieee80211_ie_htcap)/* HT caps */ + 4+2*sizeof(struct ieee80211_ie_htinfo)/* HT info */ + sizeof(struct ieee80211_ie_vhtcap)/* VHT caps */ + sizeof(struct ieee80211_ie_vht_operation)/* VHT info */ + (vap->iv_caps & IEEE80211_C_WME ? /* WME */ sizeof(struct ieee80211_wme_param) : 0) #ifdef IEEE80211_SUPPORT_SUPERG + sizeof(struct ieee80211_ath_ie) /* ATH */ #endif #ifdef IEEE80211_SUPPORT_TDMA + (vap->iv_caps & IEEE80211_C_TDMA ? /* TDMA */ sizeof(struct ieee80211_tdma_param) : 0) #endif #ifdef IEEE80211_SUPPORT_MESH + 2 + ni->ni_meshidlen + sizeof(struct ieee80211_meshconf_ie) #endif + IEEE80211_MAX_APPIE ; m = ieee80211_getmgtframe(&frm, ic->ic_headroom + sizeof(struct ieee80211_frame), pktlen); if (m == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "%s: cannot get buf; size %u\n", __func__, pktlen); vap->iv_stats.is_tx_nobuf++; return NULL; } ieee80211_beacon_construct(m, frm, ni); M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT); KASSERT(m != NULL, ("no space for 802.11 header?")); wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; *(uint16_t *)wh->i_dur = 0; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid); *(uint16_t *)wh->i_seq = 0; return m; } /* * Update the dynamic parts of a beacon frame based on the current state. */ int ieee80211_beacon_update(struct ieee80211_node *ni, struct mbuf *m, int mcast) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211com *ic = ni->ni_ic; int len_changed = 0; uint16_t capinfo; struct ieee80211_frame *wh; ieee80211_seq seqno; IEEE80211_LOCK(ic); /* * Handle 11h channel change when we've reached the count. * We must recalculate the beacon frame contents to account * for the new channel. Note we do this only for the first * vap that reaches this point; subsequent vaps just update * their beacon state to reflect the recalculated channel. */ if (isset(bo->bo_flags, IEEE80211_BEACON_CSA) && vap->iv_csa_count == ic->ic_csa_count) { vap->iv_csa_count = 0; /* * Effect channel change before reconstructing the beacon * frame contents as many places reference ni_chan. */ if (ic->ic_csa_newchan != NULL) ieee80211_csa_completeswitch(ic); /* * NB: ieee80211_beacon_construct clears all pending * updates in bo_flags so we don't need to explicitly * clear IEEE80211_BEACON_CSA. */ ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } /* * Handle the quiet time element being added and removed. * Again, for now we just cheat and reconstruct the whole * beacon - that way the gap is provided as appropriate. * * So, track whether we have already added the IE versus * whether we want to be adding the IE. */ if ((vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE) && (vap->iv_quiet == 0)) { /* * Quiet time beacon IE enabled, but it's disabled; * recalc */ vap->iv_flags_ext &= ~IEEE80211_FEXT_QUIET_IE; ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } if (((vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE) == 0) && (vap->iv_quiet == 1)) { /* * Quiet time beacon IE disabled, but it's now enabled; * recalc */ vap->iv_flags_ext |= IEEE80211_FEXT_QUIET_IE; ieee80211_beacon_construct(m, mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), ni); /* XXX do WME aggressive mode processing? */ IEEE80211_UNLOCK(ic); return 1; /* just assume length changed */ } wh = mtod(m, struct ieee80211_frame *); /* * XXX TODO Strictly speaking this should be incremented with the TX * lock held so as to serialise access to the non-qos TID sequence * number space. * * If the driver identifies it does its own TX seqno management then * we can skip this (and still not do the TX seqno.) */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++; *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m, seqno); /* XXX faster to recalculate entirely or just changes? */ capinfo = ieee80211_getcapinfo(vap, ni->ni_chan); *bo->bo_caps = htole16(capinfo); if (vap->iv_flags & IEEE80211_F_WME) { struct ieee80211_wme_state *wme = &ic->ic_wme; /* * Check for aggressive mode change. When there is * significant high priority traffic in the BSS * throttle back BE traffic by using conservative * parameters. Otherwise BE uses aggressive params * to optimize performance of legacy/non-QoS traffic. */ if (wme->wme_flags & WME_F_AGGRMODE) { if (wme->wme_hipri_traffic > wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, disable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags &= ~WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } else wme->wme_hipri_traffic = 0; } else { if (wme->wme_hipri_traffic <= wme->wme_hipri_switch_thresh) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME, "%s: traffic %u, enable aggressive mode\n", __func__, wme->wme_hipri_traffic); wme->wme_flags |= WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = 0; } else wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } if (isset(bo->bo_flags, IEEE80211_BEACON_WME)) { (void) ieee80211_add_wme_param(bo->bo_wme, wme); clrbit(bo->bo_flags, IEEE80211_BEACON_WME); } } if (isset(bo->bo_flags, IEEE80211_BEACON_HTINFO)) { ieee80211_ht_update_beacon(vap, bo); clrbit(bo->bo_flags, IEEE80211_BEACON_HTINFO); } #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { /* * NB: the beacon is potentially updated every TBTT. */ ieee80211_tdma_update_beacon(vap, bo); } #endif #ifdef IEEE80211_SUPPORT_MESH if (vap->iv_opmode == IEEE80211_M_MBSS) ieee80211_mesh_update_beacon(vap, bo); #endif if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { /* NB: no IBSS support*/ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) bo->bo_tim; if (isset(bo->bo_flags, IEEE80211_BEACON_TIM)) { u_int timlen, timoff, i; /* * ATIM/DTIM needs updating. If it fits in the * current space allocated then just copy in the * new bits. Otherwise we need to move any trailing * data to make room. Note that we know there is * contiguous space because ieee80211_beacon_allocate * insures there is space in the mbuf to write a * maximal-size virtual bitmap (based on iv_max_aid). */ /* * Calculate the bitmap size and offset, copy any * trailer out of the way, and then copy in the * new bitmap and update the information element. * Note that the tim bitmap must contain at least * one byte and any offset must be even. */ if (vap->iv_ps_pending != 0) { timoff = 128; /* impossibly large */ for (i = 0; i < vap->iv_tim_len; i++) if (vap->iv_tim_bitmap[i]) { timoff = i &~ 1; break; } KASSERT(timoff != 128, ("tim bitmap empty!")); for (i = vap->iv_tim_len-1; i >= timoff; i--) if (vap->iv_tim_bitmap[i]) break; timlen = 1 + (i - timoff); } else { timoff = 0; timlen = 1; } /* * TODO: validate this! */ if (timlen != bo->bo_tim_len) { /* copy up/down trailer */ int adjust = tie->tim_bitmap+timlen - bo->bo_tim_trailer; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_erp += adjust; bo->bo_htinfo += adjust; bo->bo_vhtinfo += adjust; #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += adjust; #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += adjust; #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += adjust; #endif bo->bo_appie += adjust; bo->bo_wme += adjust; bo->bo_csa += adjust; bo->bo_quiet += adjust; bo->bo_tim_len = timlen; /* update information element */ tie->tim_len = 3 + timlen; tie->tim_bitctl = timoff; len_changed = 1; } memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff, bo->bo_tim_len); clrbit(bo->bo_flags, IEEE80211_BEACON_TIM); IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER, "%s: TIM updated, pending %u, off %u, len %u\n", __func__, vap->iv_ps_pending, timoff, timlen); } /* count down DTIM period */ if (tie->tim_count == 0) tie->tim_count = tie->tim_period - 1; else tie->tim_count--; /* update state for buffered multicast frames on DTIM */ if (mcast && tie->tim_count == 0) tie->tim_bitctl |= 1; else tie->tim_bitctl &= ~1; if (isset(bo->bo_flags, IEEE80211_BEACON_CSA)) { struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) bo->bo_csa; /* * Insert or update CSA ie. If we're just starting * to count down to the channel switch then we need * to insert the CSA ie. Otherwise we just need to * drop the count. The actual change happens above * when the vap's count reaches the target count. */ if (vap->iv_csa_count == 0) { memmove(&csa[1], csa, bo->bo_csa_trailer_len); bo->bo_erp += sizeof(*csa); bo->bo_htinfo += sizeof(*csa); bo->bo_vhtinfo += sizeof(*csa); bo->bo_wme += sizeof(*csa); #ifdef IEEE80211_SUPPORT_SUPERG bo->bo_ath += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_TDMA bo->bo_tdma += sizeof(*csa); #endif #ifdef IEEE80211_SUPPORT_MESH bo->bo_meshconf += sizeof(*csa); #endif bo->bo_appie += sizeof(*csa); bo->bo_csa_trailer_len += sizeof(*csa); bo->bo_quiet += sizeof(*csa); bo->bo_tim_trailer_len += sizeof(*csa); m->m_len += sizeof(*csa); m->m_pkthdr.len += sizeof(*csa); ieee80211_add_csa(bo->bo_csa, vap); } else csa->csa_count--; vap->iv_csa_count++; /* NB: don't clear IEEE80211_BEACON_CSA */ } /* * Only add the quiet time IE if we've enabled it * as appropriate. */ if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) && (vap->iv_flags_ext & IEEE80211_FEXT_DFS)) { if (vap->iv_quiet && (vap->iv_flags_ext & IEEE80211_FEXT_QUIET_IE)) { ieee80211_add_quiet(bo->bo_quiet, vap, 1); } } if (isset(bo->bo_flags, IEEE80211_BEACON_ERP)) { /* * ERP element needs updating. */ (void) ieee80211_add_erp(bo->bo_erp, ic); clrbit(bo->bo_flags, IEEE80211_BEACON_ERP); } #ifdef IEEE80211_SUPPORT_SUPERG if (isset(bo->bo_flags, IEEE80211_BEACON_ATH)) { ieee80211_add_athcaps(bo->bo_ath, ni); clrbit(bo->bo_flags, IEEE80211_BEACON_ATH); } #endif } if (isset(bo->bo_flags, IEEE80211_BEACON_APPIE)) { const struct ieee80211_appie *aie = vap->iv_appie_beacon; int aielen; uint8_t *frm; aielen = 0; if (aie != NULL) aielen += aie->ie_len; if (aielen != bo->bo_appie_len) { /* copy up/down trailer */ int adjust = aielen - bo->bo_appie_len; ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust, bo->bo_tim_trailer_len); bo->bo_tim_trailer += adjust; bo->bo_appie += adjust; bo->bo_appie_len = aielen; len_changed = 1; } frm = bo->bo_appie; if (aie != NULL) frm = add_appie(frm, aie); clrbit(bo->bo_flags, IEEE80211_BEACON_APPIE); } IEEE80211_UNLOCK(ic); return len_changed; } /* * Do Ethernet-LLC encapsulation for each payload in a fast frame * tunnel encapsulation. The frame is assumed to have an Ethernet * header at the front that must be stripped before prepending the * LLC followed by the Ethernet header passed in (with an Ethernet * type that specifies the payload size). */ struct mbuf * ieee80211_ff_encap1(struct ieee80211vap *vap, struct mbuf *m, const struct ether_header *eh) { struct llc *llc; uint16_t payload; /* XXX optimize by combining m_adj+M_PREPEND */ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); llc = mtod(m, struct llc *); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh->ether_type; payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */ M_PREPEND(m, sizeof(struct ether_header), M_NOWAIT); if (m == NULL) { /* XXX cannot happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: no space for ether_header\n", __func__); vap->iv_stats.is_tx_nobuf++; return NULL; } ETHER_HEADER_COPY(mtod(m, void *), eh); mtod(m, struct ether_header *)->ether_type = htons(payload); return m; } /* * Complete an mbuf transmission. * * For now, this simply processes a completed frame after the * driver has completed it's transmission and/or retransmission. * It assumes the frame is an 802.11 encapsulated frame. * * Later on it will grow to become the exit path for a given frame * from the driver and, depending upon how it's been encapsulated * and already transmitted, it may end up doing A-MPDU retransmission, * power save requeuing, etc. * * In order for the above to work, the driver entry point to this * must not hold any driver locks. Thus, the driver needs to delay * any actual mbuf completion until it can release said locks. * * This frees the mbuf and if the mbuf has a node reference, * the node reference will be freed. */ void ieee80211_tx_complete(struct ieee80211_node *ni, struct mbuf *m, int status) { if (ni != NULL) { struct ifnet *ifp = ni->ni_vap->iv_ifp; if (status == 0) { if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (m->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, status); ieee80211_free_node(ni); } m_freem(m); } Index: head/sys/net80211/ieee80211_proto.h =================================================================== --- head/sys/net80211/ieee80211_proto.h (revision 330687) +++ head/sys/net80211/ieee80211_proto.h (revision 330688) @@ -1,440 +1,442 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NET80211_IEEE80211_PROTO_H_ #define _NET80211_IEEE80211_PROTO_H_ /* * 802.11 protocol implementation definitions. */ enum ieee80211_state { IEEE80211_S_INIT = 0, /* default state */ IEEE80211_S_SCAN = 1, /* scanning */ IEEE80211_S_AUTH = 2, /* try to authenticate */ IEEE80211_S_ASSOC = 3, /* try to assoc */ IEEE80211_S_CAC = 4, /* doing channel availability check */ IEEE80211_S_RUN = 5, /* operational (e.g. associated) */ IEEE80211_S_CSA = 6, /* channel switch announce pending */ IEEE80211_S_SLEEP = 7, /* power save */ }; #define IEEE80211_S_MAX (IEEE80211_S_SLEEP+1) #define IEEE80211_SEND_MGMT(_ni,_type,_arg) \ ((*(_ni)->ni_ic->ic_send_mgmt)(_ni, _type, _arg)) extern const char *mgt_subtype_name[]; extern const char *ctl_subtype_name[]; extern const char *ieee80211_phymode_name[IEEE80211_MODE_MAX]; extern const int ieee80211_opcap[IEEE80211_OPMODE_MAX]; static __inline const char * ieee80211_mgt_subtype_name(uint8_t subtype) { return mgt_subtype_name[(subtype & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT]; } static __inline const char * ieee80211_ctl_subtype_name(uint8_t subtype) { return ctl_subtype_name[(subtype & IEEE80211_FC0_SUBTYPE_MASK) >> IEEE80211_FC0_SUBTYPE_SHIFT]; } const char *ieee80211_reason_to_string(uint16_t); void ieee80211_proto_attach(struct ieee80211com *); void ieee80211_proto_detach(struct ieee80211com *); void ieee80211_proto_vattach(struct ieee80211vap *); void ieee80211_proto_vdetach(struct ieee80211vap *); void ieee80211_promisc(struct ieee80211vap *, bool); void ieee80211_allmulti(struct ieee80211vap *, bool); void ieee80211_syncflag(struct ieee80211vap *, int flag); void ieee80211_syncflag_ht(struct ieee80211vap *, int flag); void ieee80211_syncflag_vht(struct ieee80211vap *, int flag); void ieee80211_syncflag_ext(struct ieee80211vap *, int flag); #define ieee80211_input(ni, m, rssi, nf) \ ((ni)->ni_vap->iv_input(ni, m, NULL, rssi, nf)) int ieee80211_input_all(struct ieee80211com *, struct mbuf *, int, int); int ieee80211_input_mimo(struct ieee80211_node *, struct mbuf *); int ieee80211_input_mimo_all(struct ieee80211com *, struct mbuf *); struct ieee80211_bpf_params; int ieee80211_mgmt_output(struct ieee80211_node *, struct mbuf *, int, struct ieee80211_bpf_params *); int ieee80211_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); int ieee80211_output(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *ro); int ieee80211_vap_pkt_send_dest(struct ieee80211vap *, struct mbuf *, struct ieee80211_node *); int ieee80211_raw_output(struct ieee80211vap *, struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); void ieee80211_send_setup(struct ieee80211_node *, struct mbuf *, int, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); int ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m); void ieee80211_vap_qflush(struct ifnet *ifp); int ieee80211_send_nulldata(struct ieee80211_node *); int ieee80211_classify(struct ieee80211_node *, struct mbuf *m); struct mbuf *ieee80211_mbuf_adjust(struct ieee80211vap *, int, struct ieee80211_key *, struct mbuf *); struct mbuf *ieee80211_encap(struct ieee80211vap *, struct ieee80211_node *, struct mbuf *); void ieee80211_free_mbuf(struct mbuf *); int ieee80211_send_mgmt(struct ieee80211_node *, int, int); struct ieee80211_appie; int ieee80211_send_probereq(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t *ssid, size_t ssidlen); struct mbuf * ieee80211_ff_encap1(struct ieee80211vap *, struct mbuf *, const struct ether_header *); void ieee80211_tx_complete(struct ieee80211_node *, struct mbuf *, int); /* * The formation of ProbeResponse frames requires guidance to * deal with legacy clients. When the client is identified as * "legacy 11b" ieee80211_send_proberesp is passed this token. */ #define IEEE80211_SEND_LEGACY_11B 0x1 /* legacy 11b client */ #define IEEE80211_SEND_LEGACY_11 0x2 /* other legacy client */ #define IEEE80211_SEND_LEGACY 0x3 /* any legacy client */ struct mbuf *ieee80211_alloc_proberesp(struct ieee80211_node *, int); int ieee80211_send_proberesp(struct ieee80211vap *, const uint8_t da[IEEE80211_ADDR_LEN], int); struct mbuf *ieee80211_alloc_rts(struct ieee80211com *ic, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN], uint16_t); struct mbuf *ieee80211_alloc_cts(struct ieee80211com *, const uint8_t [IEEE80211_ADDR_LEN], uint16_t); +struct mbuf *ieee80211_alloc_prot(struct ieee80211_node *, + const struct mbuf *, uint8_t, int); uint8_t *ieee80211_add_rates(uint8_t *, const struct ieee80211_rateset *); uint8_t *ieee80211_add_xrates(uint8_t *, const struct ieee80211_rateset *); uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); uint8_t *ieee80211_add_wpa(uint8_t *, const struct ieee80211vap *); uint8_t *ieee80211_add_rsn(uint8_t *, const struct ieee80211vap *); uint8_t *ieee80211_add_qos(uint8_t *, const struct ieee80211_node *); uint16_t ieee80211_getcapinfo(struct ieee80211vap *, struct ieee80211_channel *); struct ieee80211_wme_state; uint8_t * ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme); void ieee80211_reset_erp(struct ieee80211com *); void ieee80211_set_shortslottime(struct ieee80211com *, int onoff); int ieee80211_iserp_rateset(const struct ieee80211_rateset *); void ieee80211_setbasicrates(struct ieee80211_rateset *, enum ieee80211_phymode); void ieee80211_addbasicrates(struct ieee80211_rateset *, enum ieee80211_phymode); /* * Return the size of the 802.11 header for a management or data frame. */ static __inline int ieee80211_hdrsize(const void *data) { const struct ieee80211_frame *wh = data; int size = sizeof(struct ieee80211_frame); /* NB: we don't handle control frames */ KASSERT((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL, ("%s: control frame", __func__)); if (IEEE80211_IS_DSTODS(wh)) size += IEEE80211_ADDR_LEN; if (IEEE80211_QOS_HAS_SEQ(wh)) size += sizeof(uint16_t); return size; } /* * Like ieee80211_hdrsize, but handles any type of frame. */ static __inline int ieee80211_anyhdrsize(const void *data) { const struct ieee80211_frame *wh = data; if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: return sizeof(struct ieee80211_frame_ack); case IEEE80211_FC0_SUBTYPE_BAR: return sizeof(struct ieee80211_frame_bar); } return sizeof(struct ieee80211_frame_min); } else return ieee80211_hdrsize(data); } /* * Template for an in-kernel authenticator. Authenticators * register with the protocol code and are typically loaded * as separate modules as needed. One special authenticator * is xauth; it intercepts requests so that protocols like * WPA can be handled in user space. */ struct ieee80211_authenticator { const char *ia_name; /* printable name */ int (*ia_attach)(struct ieee80211vap *); void (*ia_detach)(struct ieee80211vap *); void (*ia_node_join)(struct ieee80211_node *); void (*ia_node_leave)(struct ieee80211_node *); }; void ieee80211_authenticator_register(int type, const struct ieee80211_authenticator *); void ieee80211_authenticator_unregister(int type); const struct ieee80211_authenticator *ieee80211_authenticator_get(int auth); struct ieee80211req; /* * Template for an MAC ACL policy module. Such modules * register with the protocol code and are passed the sender's * address of each received auth frame for validation. */ struct ieee80211_aclator { const char *iac_name; /* printable name */ int (*iac_attach)(struct ieee80211vap *); void (*iac_detach)(struct ieee80211vap *); int (*iac_check)(struct ieee80211vap *, const struct ieee80211_frame *wh); int (*iac_add)(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); int (*iac_remove)(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); int (*iac_flush)(struct ieee80211vap *); int (*iac_setpolicy)(struct ieee80211vap *, int); int (*iac_getpolicy)(struct ieee80211vap *); int (*iac_setioctl)(struct ieee80211vap *, struct ieee80211req *); int (*iac_getioctl)(struct ieee80211vap *, struct ieee80211req *); }; void ieee80211_aclator_register(const struct ieee80211_aclator *); void ieee80211_aclator_unregister(const struct ieee80211_aclator *); const struct ieee80211_aclator *ieee80211_aclator_get(const char *name); /* flags for ieee80211_fix_rate() */ #define IEEE80211_F_DOSORT 0x00000001 /* sort rate list */ #define IEEE80211_F_DOFRATE 0x00000002 /* use fixed legacy rate */ #define IEEE80211_F_DONEGO 0x00000004 /* calc negotiated rate */ #define IEEE80211_F_DODEL 0x00000008 /* delete ignore rate */ #define IEEE80211_F_DOBRS 0x00000010 /* check basic rate set */ #define IEEE80211_F_JOIN 0x00000020 /* sta joining our bss */ #define IEEE80211_F_DOFMCS 0x00000040 /* use fixed HT rate */ int ieee80211_fix_rate(struct ieee80211_node *, struct ieee80211_rateset *, int); /* * WME/WMM support. */ struct wmeParams { uint8_t wmep_acm; uint8_t wmep_aifsn; uint8_t wmep_logcwmin; /* log2(cwmin) */ uint8_t wmep_logcwmax; /* log2(cwmax) */ uint8_t wmep_txopLimit; uint8_t wmep_noackPolicy; /* 0 (ack), 1 (no ack) */ }; #define IEEE80211_TXOP_TO_US(_txop) ((_txop)<<5) #define IEEE80211_US_TO_TXOP(_us) ((_us)>>5) struct chanAccParams { uint8_t cap_info; /* version of the current set */ struct wmeParams cap_wmeParams[WME_NUM_AC]; }; struct ieee80211_wme_state { u_int wme_flags; #define WME_F_AGGRMODE 0x00000001 /* STATUS: WME aggressive mode */ u_int wme_hipri_traffic; /* VI/VO frames in beacon interval */ u_int wme_hipri_switch_thresh;/* aggressive mode switch thresh */ u_int wme_hipri_switch_hysteresis;/* aggressive mode switch hysteresis */ struct wmeParams wme_params[4]; /* from assoc resp for each AC*/ struct chanAccParams wme_wmeChanParams; /* WME params applied to self */ struct chanAccParams wme_wmeBssChanParams;/* WME params bcast to stations */ struct chanAccParams wme_chanParams; /* params applied to self */ struct chanAccParams wme_bssChanParams; /* params bcast to stations */ int (*wme_update)(struct ieee80211com *); }; void ieee80211_wme_initparams(struct ieee80211vap *); void ieee80211_wme_updateparams(struct ieee80211vap *); void ieee80211_wme_updateparams_locked(struct ieee80211vap *); void ieee80211_wme_vap_getparams(struct ieee80211vap *vap, struct chanAccParams *); void ieee80211_wme_ic_getparams(struct ieee80211com *ic, struct chanAccParams *); int ieee80211_wme_vap_ac_is_noack(struct ieee80211vap *vap, int ac); /* * Return the WME TID from a QoS frame. If no TID * is present return the index for the "non-QoS" entry. */ static __inline uint8_t ieee80211_gettid(const struct ieee80211_frame *wh) { uint8_t tid; if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_IS_DSTODS(wh)) tid = ((const struct ieee80211_qosframe_addr4 *)wh)-> i_qos[0]; else tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid &= IEEE80211_QOS_TID; } else tid = IEEE80211_NONQOS_TID; return tid; } void ieee80211_waitfor_parent(struct ieee80211com *); void ieee80211_start_locked(struct ieee80211vap *); void ieee80211_init(void *); void ieee80211_start_all(struct ieee80211com *); void ieee80211_stop_locked(struct ieee80211vap *); void ieee80211_stop(struct ieee80211vap *); void ieee80211_stop_all(struct ieee80211com *); void ieee80211_suspend_all(struct ieee80211com *); void ieee80211_resume_all(struct ieee80211com *); void ieee80211_restart_all(struct ieee80211com *); void ieee80211_dturbo_switch(struct ieee80211vap *, int newflags); void ieee80211_swbmiss(void *arg); void ieee80211_beacon_miss(struct ieee80211com *); int ieee80211_new_state(struct ieee80211vap *, enum ieee80211_state, int); int ieee80211_new_state_locked(struct ieee80211vap *, enum ieee80211_state, int); void ieee80211_print_essid(const uint8_t *, int); void ieee80211_dump_pkt(struct ieee80211com *, const uint8_t *, int, int, int); extern const char *ieee80211_opmode_name[]; extern const char *ieee80211_state_name[IEEE80211_S_MAX]; extern const char *ieee80211_wme_acnames[]; /* * Beacon frames constructed by ieee80211_beacon_alloc * have the following structure filled in so drivers * can update the frame later w/ minimal overhead. */ struct ieee80211_beacon_offsets { uint8_t bo_flags[4]; /* update/state flags */ uint16_t *bo_caps; /* capabilities */ uint8_t *bo_cfp; /* start of CFParms element */ uint8_t *bo_tim; /* start of atim/dtim */ uint8_t *bo_wme; /* start of WME parameters */ uint8_t *bo_tdma; /* start of TDMA parameters */ uint8_t *bo_tim_trailer;/* start of fixed-size trailer */ uint16_t bo_tim_len; /* atim/dtim length in bytes */ uint16_t bo_tim_trailer_len;/* tim trailer length in bytes */ uint8_t *bo_erp; /* start of ERP element */ uint8_t *bo_htinfo; /* start of HT info element */ uint8_t *bo_ath; /* start of ATH parameters */ uint8_t *bo_appie; /* start of AppIE element */ uint16_t bo_appie_len; /* AppIE length in bytes */ uint16_t bo_csa_trailer_len; uint8_t *bo_csa; /* start of CSA element */ uint8_t *bo_quiet; /* start of Quiet element */ uint8_t *bo_meshconf; /* start of MESHCONF element */ uint8_t *bo_vhtinfo; /* start of VHT info element (XXX VHTCAP?) */ uint8_t *bo_spare[2]; }; struct mbuf *ieee80211_beacon_alloc(struct ieee80211_node *); /* * Beacon frame updates are signaled through calls to iv_update_beacon * with one of the IEEE80211_BEACON_* tokens defined below. For devices * that construct beacon frames on the host this can trigger a rebuild * or defer the processing. For devices that offload beacon frame * handling this callback can be used to signal a rebuild. The bo_flags * array in the ieee80211_beacon_offsets structure is intended to record * deferred processing requirements; ieee80211_beacon_update uses the * state to optimize work. Since this structure is owned by the driver * and not visible to the 802.11 layer drivers must supply an iv_update_beacon * callback that marks the flag bits and schedules (as necessary) an update. */ enum { IEEE80211_BEACON_CAPS = 0, /* capabilities */ IEEE80211_BEACON_TIM = 1, /* DTIM/ATIM */ IEEE80211_BEACON_WME = 2, IEEE80211_BEACON_ERP = 3, /* Extended Rate Phy */ IEEE80211_BEACON_HTINFO = 4, /* HT Information */ IEEE80211_BEACON_APPIE = 5, /* Application IE's */ IEEE80211_BEACON_CFP = 6, /* CFParms */ IEEE80211_BEACON_CSA = 7, /* Channel Switch Announcement */ IEEE80211_BEACON_TDMA = 9, /* TDMA Info */ IEEE80211_BEACON_ATH = 10, /* ATH parameters */ IEEE80211_BEACON_MESHCONF = 11, /* Mesh Configuration */ IEEE80211_BEACON_QUIET = 12, /* Quiet time IE */ IEEE80211_BEACON_VHTINFO = 13, /* VHT information */ }; int ieee80211_beacon_update(struct ieee80211_node *, struct mbuf *, int mcast); void ieee80211_csa_startswitch(struct ieee80211com *, struct ieee80211_channel *, int mode, int count); void ieee80211_csa_completeswitch(struct ieee80211com *); void ieee80211_csa_cancelswitch(struct ieee80211com *); void ieee80211_cac_completeswitch(struct ieee80211vap *); /* * Notification methods called from the 802.11 state machine. * Note that while these are defined here, their implementation * is OS-specific. */ void ieee80211_notify_node_join(struct ieee80211_node *, int newassoc); void ieee80211_notify_node_leave(struct ieee80211_node *); void ieee80211_notify_scan_done(struct ieee80211vap *); void ieee80211_notify_wds_discover(struct ieee80211_node *); void ieee80211_notify_csa(struct ieee80211com *, const struct ieee80211_channel *, int mode, int count); void ieee80211_notify_radar(struct ieee80211com *, const struct ieee80211_channel *); enum ieee80211_notify_cac_event { IEEE80211_NOTIFY_CAC_START = 0, /* CAC timer started */ IEEE80211_NOTIFY_CAC_STOP = 1, /* CAC intentionally stopped */ IEEE80211_NOTIFY_CAC_RADAR = 2, /* CAC stopped due to radar detectio */ IEEE80211_NOTIFY_CAC_EXPIRE = 3, /* CAC expired w/o radar */ }; void ieee80211_notify_cac(struct ieee80211com *, const struct ieee80211_channel *, enum ieee80211_notify_cac_event); void ieee80211_notify_node_deauth(struct ieee80211_node *); void ieee80211_notify_node_auth(struct ieee80211_node *); void ieee80211_notify_country(struct ieee80211vap *, const uint8_t [], const uint8_t cc[2]); void ieee80211_notify_radio(struct ieee80211com *, int); #endif /* _NET80211_IEEE80211_PROTO_H_ */