Page MenuHomeFreeBSD

D49259.id151965.diff
No OneTemporary

D49259.id151965.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
new file mode 100644
--- /dev/null
+++ b/sys/dev/iwx/if_iwx.c
@@ -0,0 +1,10927 @@
+/* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
+
+/*-
+ * Copyright (c) 2024 Future Crew, LLC
+ * Author: Mikhail Pchelin <misha@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
+ * Author: Stefan Sperling <stsp@openbsd.org>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+#include <sys/epoch.h>
+#include <sys/kdb.h>
+
+#include <machine/bus.h>
+#include <machine/endian.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <net/bpf.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_regdomain.h>
+#include <net80211/ieee80211_ratectl.h>
+#include <net80211/ieee80211_vht.h>
+
+int iwx_himark = 224;
+int iwx_lomark = 192;
+
+#define IWX_FBSD_RSP_V3 3
+#define IWX_FBSD_RSP_V4 4
+
+#define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
+#define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
+
+#define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
+#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
+
+#include <dev/iwx/if_iwxreg.h>
+#include <dev/iwx/if_iwxvar.h>
+
+#include <dev/iwx/if_iwx_debug.h>
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
+#define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
+#define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
+#define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
+
+static const struct iwx_devices {
+ uint16_t device;
+ char *name;
+} iwx_devices[] = {
+ { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
+ { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
+ { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
+ { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
+};
+
+static const uint8_t iwx_nvm_channels_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
+static const uint8_t iwx_nvm_channels_uhb[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181,
+ /* 6-7 GHz */
+ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
+ 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
+ 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
+};
+
+#define IWX_NUM_2GHZ_CHANNELS 14
+#define IWX_NUM_5GHZ_CHANNELS 37
+
+const struct iwx_rate {
+ uint16_t rate;
+ uint8_t plcp;
+ uint8_t ht_plcp;
+} iwx_rates[] = {
+ /* Legacy */ /* HT */
+ { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
+ { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
+ { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
+ { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
+ { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
+ { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
+ { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
+ { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
+ { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
+ { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
+ { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
+ { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
+ { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
+ { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
+ { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
+ { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
+};
+#define IWX_RIDX_CCK 0
+#define IWX_RIDX_OFDM 4
+#define IWX_RIDX_MAX (nitems(iwx_rates)-1)
+#define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
+#define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
+#define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
+
+/* Convert an MCS index into an iwx_rates[] index. */
+const int iwx_mcs2ridx[] = {
+ IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_MCS_1_INDEX,
+ IWX_RATE_MCS_2_INDEX,
+ IWX_RATE_MCS_3_INDEX,
+ IWX_RATE_MCS_4_INDEX,
+ IWX_RATE_MCS_5_INDEX,
+ IWX_RATE_MCS_6_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+};
+
+static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
+static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
+static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
+static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
+static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
+static int iwx_apply_debug_destination(struct iwx_softc *);
+static void iwx_set_ltr(struct iwx_softc *);
+static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
+static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
+ const struct iwx_fw_sects *);
+static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
+static void iwx_ctxt_info_free_paging(struct iwx_softc *);
+static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
+ struct iwx_context_info_dram *);
+static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
+static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
+ const uint8_t *, size_t);
+static int iwx_set_default_calib(struct iwx_softc *, const void *);
+static void iwx_fw_info_free(struct iwx_fw_info *);
+static int iwx_read_firmware(struct iwx_softc *);
+static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
+static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
+static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
+static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
+static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
+static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
+static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
+static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
+static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
+static int iwx_nic_lock(struct iwx_softc *);
+static void iwx_nic_assert_locked(struct iwx_softc *);
+static void iwx_nic_unlock(struct iwx_softc *);
+static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
+ uint32_t);
+static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
+static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
+ bus_size_t, bus_size_t);
+static void iwx_dma_contig_free(struct iwx_dma_info *);
+static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static void iwx_disable_rx_dma(struct iwx_softc *);
+static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
+static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_enable_rfkill_int(struct iwx_softc *);
+static int iwx_check_rfkill(struct iwx_softc *);
+static void iwx_enable_interrupts(struct iwx_softc *);
+static void iwx_enable_fwload_interrupt(struct iwx_softc *);
+static void iwx_restore_interrupts(struct iwx_softc *);
+static void iwx_disable_interrupts(struct iwx_softc *);
+static void iwx_ict_reset(struct iwx_softc *);
+static int iwx_set_hw_ready(struct iwx_softc *);
+static int iwx_prepare_card_hw(struct iwx_softc *);
+static int iwx_force_power_gating(struct iwx_softc *);
+static void iwx_apm_config(struct iwx_softc *);
+static int iwx_apm_init(struct iwx_softc *);
+static void iwx_apm_stop(struct iwx_softc *);
+static int iwx_allow_mcast(struct iwx_softc *);
+static void iwx_init_msix_hw(struct iwx_softc *);
+static void iwx_conf_msix_hw(struct iwx_softc *, int);
+static int iwx_clear_persistence_bit(struct iwx_softc *);
+static int iwx_start_hw(struct iwx_softc *);
+static void iwx_stop_device(struct iwx_softc *);
+static void iwx_nic_config(struct iwx_softc *);
+static int iwx_nic_rx_init(struct iwx_softc *);
+static int iwx_nic_init(struct iwx_softc *);
+static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
+static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
+static void iwx_post_alive(struct iwx_softc *);
+static int iwx_schedule_session_protection(struct iwx_softc *,
+ struct iwx_node *, uint32_t);
+static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
+static void iwx_init_channel_map(struct ieee80211com *, int, int *,
+ struct ieee80211_channel[]);
+static int iwx_mimo_enabled(struct iwx_softc *);
+static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
+ uint16_t);
+static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
+static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
+ uint16_t, uint16_t, int, int);
+static void iwx_sta_tx_agg_start(struct iwx_softc *,
+ struct ieee80211_node *, uint8_t);
+static void iwx_ba_rx_task(void *, int);
+static void iwx_ba_tx_task(void *, int);
+static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
+static int iwx_is_valid_mac_addr(const uint8_t *);
+static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
+static int iwx_nvm_get(struct iwx_softc *);
+static int iwx_load_firmware(struct iwx_softc *);
+static int iwx_start_fw(struct iwx_softc *);
+static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
+static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
+static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
+static int iwx_load_pnvm(struct iwx_softc *);
+static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
+static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
+static int iwx_load_ucode_wait_alive(struct iwx_softc *);
+static int iwx_send_dqa_cmd(struct iwx_softc *);
+static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
+static int iwx_config_ltr(struct iwx_softc *);
+static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
+static int iwx_rx_addbuf(struct iwx_softc *, int, int);
+static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
+static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
+static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
+#if 0
+int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
+ struct ieee80211_node *, struct ieee80211_rxinfo *);
+#endif
+static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
+ int, int, uint32_t, uint8_t);
+static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
+static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
+ struct iwx_tx_data *);
+static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
+static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
+static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
+static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
+ struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
+#if 0
+static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
+#endif
+static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
+static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
+static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
+ const void *);
+static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
+ uint32_t *);
+static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
+ const void *, uint32_t *);
+static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
+static void iwx_cmd_done(struct iwx_softc *, int, int, int);
+static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
+static uint32_t iwx_fw_rateidx_cck(uint8_t);
+static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
+ struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
+ struct mbuf *);
+static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
+ uint16_t, uint16_t);
+static int iwx_tx(struct iwx_softc *, struct mbuf *,
+ struct ieee80211_node *);
+static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
+static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
+static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
+static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
+ struct iwx_beacon_filter_cmd *);
+static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
+ int);
+static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_power_cmd *);
+static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
+static int iwx_power_update_device(struct iwx_softc *);
+static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
+static int iwx_disable_beacon_filter(struct iwx_softc *);
+static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
+static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
+static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
+static int iwx_fill_probe_req(struct iwx_softc *,
+ struct iwx_scan_probe_req *);
+static int iwx_config_umac_scan_reduced(struct iwx_softc *);
+static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
+static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
+ struct iwx_scan_general_params_v10 *, int);
+static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
+ struct iwx_scan_general_params_v10 *, uint16_t, int);
+static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
+ struct iwx_scan_channel_params_v6 *, uint32_t, int);
+static int iwx_umac_scan_v14(struct iwx_softc *, int);
+static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
+static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
+static int iwx_rval2ridx(int);
+static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
+ int *);
+static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_ctx_cmd *, uint32_t);
+static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_data_sta *, int);
+static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
+ uint32_t, int);
+static int iwx_clear_statistics(struct iwx_softc *);
+static int iwx_scan(struct iwx_softc *);
+static int iwx_bgscan(struct ieee80211com *);
+static int iwx_enable_mgmt_queue(struct iwx_softc *);
+static int iwx_disable_mgmt_queue(struct iwx_softc *);
+static int iwx_rs_rval2idx(uint8_t);
+static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
+ int);
+static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
+static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
+static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
+static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
+static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t);
+static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
+ struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
+ uint8_t);
+static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
+static int iwx_deauth(struct iwx_softc *);
+static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
+static int iwx_run_stop(struct iwx_softc *);
+static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
+ const uint8_t[IEEE80211_ADDR_LEN]);
+#if 0
+int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
+ struct ieee80211_key *);
+void iwx_setkey_task(void *);
+void iwx_delete_key(struct ieee80211com *,
+ struct ieee80211_node *, struct ieee80211_key *);
+#endif
+static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static void iwx_endscan(struct iwx_softc *);
+static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
+ struct ieee80211_node *);
+static int iwx_sf_config(struct iwx_softc *, int);
+static int iwx_send_bt_init_conf(struct iwx_softc *);
+static int iwx_send_soc_conf(struct iwx_softc *);
+static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
+static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
+static int iwx_init_hw(struct iwx_softc *);
+static int iwx_init(struct iwx_softc *);
+static void iwx_stop(struct iwx_softc *);
+static void iwx_watchdog(void *);
+static const char *iwx_desc_lookup(uint32_t);
+static void iwx_nic_error(struct iwx_softc *);
+static void iwx_dump_driver_status(struct iwx_softc *);
+static void iwx_nic_umac_error(struct iwx_softc *);
+static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
+static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
+static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
+ struct mbuf *);
+static void iwx_notif_intr(struct iwx_softc *);
+#if 0
+/* XXX-THJ - I don't have hardware for this */
+static int iwx_intr(void *);
+#endif
+static void iwx_intr_msix(void *);
+static int iwx_preinit(struct iwx_softc *);
+static void iwx_attach_hook(void *);
+static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
+static int iwx_probe(device_t);
+static int iwx_attach(device_t);
+static int iwx_detach(device_t);
+
+/* FreeBSD specific glue */
+u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+#ifdef IWX_DEBUG
+#define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
+#else
+#define DPRINTF(x) do { ; } while (0)
+#endif
+
+/* FreeBSD specific functions */
+static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
+ const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
+ const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
+static void iwx_vap_delete(struct ieee80211vap *);
+static void iwx_parent(struct ieee80211com *);
+static void iwx_scan_start(struct ieee80211com *);
+static void iwx_scan_end(struct ieee80211com *);
+static void iwx_update_mcast(struct ieee80211com *ic);
+static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
+static void iwx_scan_mindwell(struct ieee80211_scan_state *);
+static void iwx_set_channel(struct ieee80211com *);
+static void iwx_endscan_cb(void *, int );
+static int iwx_wme_update(struct ieee80211com *);
+static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
+ const struct ieee80211_bpf_params *);
+static int iwx_transmit(struct ieee80211com *, struct mbuf *);
+static void iwx_start(struct iwx_softc *);
+static int iwx_ampdu_rx_start(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int, int, int);
+static void iwx_ampdu_rx_stop(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+static int iwx_addba_request(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+static int iwx_addba_response(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+static void iwx_key_update_begin(struct ieee80211vap *);
+static void iwx_key_update_end(struct ieee80211vap *);
+static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
+ ieee80211_keyix *,ieee80211_keyix *);
+static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
+static int iwx_key_delete(struct ieee80211vap *,
+ const struct ieee80211_key *);
+static int iwx_suspend(device_t);
+static int iwx_resume(device_t);
+static void iwx_radiotap_attach(struct iwx_softc *);
+
+/* OpenBSD compat defines */
+#define IEEE80211_HTOP0_SCO_SCN 0
+#define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
+#define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
+
+#define IEEE80211_HT_RATESET_SISO 0
+#define IEEE80211_HT_RATESET_MIMO2 2
+
+const struct ieee80211_rateset ieee80211_std_rateset_11a =
+ { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+const struct ieee80211_rateset ieee80211_std_rateset_11b =
+ { 4, { 2, 4, 11, 22 } };
+
+const struct ieee80211_rateset ieee80211_std_rateset_11g =
+ { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+inline int
+ieee80211_has_addr4(const struct ieee80211_frame *wh)
+{
+ return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
+ IEEE80211_FC1_DIR_DSTODS;
+}
+
+static uint8_t
+iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwx_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWX_FW_CMD_VER_UNKNOWN;
+}
+
+uint8_t
+iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwx_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->notif_ver;
+ }
+
+ return IWX_FW_CMD_VER_UNKNOWN;
+}
+
+static int
+iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
+{
+ const struct iwx_fw_cscheme_list *l = (const void *)data;
+
+ if (dlen < sizeof(*l) ||
+ dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
+ return EINVAL;
+
+ /* we don't actually store anything for now, always use s/w crypto */
+
+ return 0;
+}
+
+static int
+iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
+ const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
+{
+ int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
+ if (err) {
+ printf("%s: could not allocate context info DMA memory\n",
+ DEVNAME(sc));
+ return err;
+ }
+
+ memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
+
+ return 0;
+}
+
+static void
+iwx_ctxt_info_free_paging(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->paging)
+ return;
+
+ /* free paging*/
+ for (i = 0; i < dram->paging_cnt; i++)
+ iwx_dma_contig_free(&dram->paging[i]);
+
+ free(dram->paging, M_DEVBUF);
+ dram->paging_cnt = 0;
+ dram->paging = NULL;
+}
+
+static int
+iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
+{
+ int i = 0;
+
+ while (start < fws->fw_count &&
+ fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
+ fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static int
+iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
+ struct iwx_context_info_dram *ctxt_dram)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i, ret, fw_cnt = 0;
+
+ KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
+
+ dram->lmac_cnt = iwx_get_num_sections(fws, 0);
+ /* add 1 due to separator */
+ dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
+ /* add 2 due to separators */
+ dram->paging_cnt = iwx_get_num_sections(fws,
+ dram->lmac_cnt + dram->umac_cnt + 2);
+
+ IWX_UNLOCK(sc);
+ dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
+ sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!dram->fw) {
+ printf("%s: could not allocate memory for firmware sections\n",
+ DEVNAME(sc));
+ IWX_LOCK(sc);
+ return ENOMEM;
+ }
+
+ dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
+ M_DEVBUF, M_ZERO | M_WAITOK);
+ IWX_LOCK(sc);
+ if (!dram->paging) {
+ printf("%s: could not allocate memory for firmware paging\n",
+ DEVNAME(sc));
+ return ENOMEM;
+ }
+
+ /* initialize lmac sections */
+ for (i = 0; i < dram->lmac_cnt; i++) {
+ ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
+ &dram->fw[fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->lmac_img[i] =
+ htole64(dram->fw[fw_cnt].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware LMAC section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->fw[fw_cnt].paddr,
+ (unsigned long long)dram->fw[fw_cnt].size);
+ fw_cnt++;
+ }
+
+ /* initialize umac sections */
+ for (i = 0; i < dram->umac_cnt; i++) {
+ /* access FW with +1 to make up for lmac separator */
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->umac_img[i] =
+ htole64(dram->fw[fw_cnt].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware UMAC section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->fw[fw_cnt].paddr,
+ (unsigned long long)dram->fw[fw_cnt].size);
+ fw_cnt++;
+ }
+
+ /*
+ * Initialize paging.
+ * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+ * stored separately.
+ * This is since the timing of its release is different -
+ * while fw memory can be released on alive, the paging memory can be
+ * freed only when the device goes down.
+ * Given that, the logic here in accessing the fw image is a bit
+ * different - fw_cnt isn't changing so loop counter is added to it.
+ */
+ for (i = 0; i < dram->paging_cnt; i++) {
+ /* access FW with +2 to make up for lmac & umac separators */
+ int fw_idx = fw_cnt + i + 2;
+
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[fw_idx], &dram->paging[i]);
+ if (ret)
+ return ret;
+
+ ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware paging section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->paging[i].paddr,
+ (unsigned long long)dram->paging[i].size);
+ }
+
+ return 0;
+}
+
+static void
+iwx_fw_version_str(char *buf, size_t bufsize,
+ uint32_t major, uint32_t minor, uint32_t api)
+{
+ /*
+ * Starting with major version 35 the Linux driver prints the minor
+ * version in hexadecimal.
+ */
+ if (major >= 35)
+ snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
+ else
+ snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
+}
+
+static int
+iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
+ uint8_t min_power)
+{
+ struct iwx_dma_info *fw_mon = &sc->fw_mon;
+ uint32_t size = 0;
+ uint8_t power;
+ int err;
+
+ if (fw_mon->size)
+ return 0;
+
+ for (power = max_power; power >= min_power; power--) {
+ size = (1 << power);
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
+ if (err)
+ continue;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: allocated 0x%08x bytes for firmware monitor.\n",
+ DEVNAME(sc), size);
+ break;
+ }
+
+ if (err) {
+ fw_mon->size = 0;
+ return err;
+ }
+
+ if (power != max_power)
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
+ DEVNAME(sc), (unsigned long)(1 << (power - 10)),
+ (unsigned long)(1 << (max_power - 10)));
+
+ return 0;
+}
+
+static int
+iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
+{
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (max_power > 26) {
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: External buffer size for monitor is too big %d, "
+ "check the FW TLV\n", DEVNAME(sc), max_power);
+ return 0;
+ }
+
+ if (sc->fw_mon.size)
+ return 0;
+
+ return iwx_alloc_fw_monitor_block(sc, max_power, 11);
+}
+
+static int
+iwx_apply_debug_destination(struct iwx_softc *sc)
+{
+#if 0
+ struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
+ int i, err;
+ uint8_t mon_mode, size_power, base_shift, end_shift;
+ uint32_t base_reg, end_reg;
+
+ dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
+ mon_mode = dest_v1->monitor_mode;
+ size_power = dest_v1->size_power;
+ base_reg = le32toh(dest_v1->base_reg);
+ end_reg = le32toh(dest_v1->end_reg);
+ base_shift = dest_v1->base_shift;
+ end_shift = dest_v1->end_shift;
+
+ DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
+
+ if (mon_mode == EXTERNAL_MODE) {
+ err = iwx_alloc_fw_monitor(sc, size_power);
+ if (err)
+ return err;
+ }
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
+ uint32_t addr, val;
+ uint8_t op;
+
+ addr = le32toh(dest_v1->reg_ops[i].addr);
+ val = le32toh(dest_v1->reg_ops[i].val);
+ op = dest_v1->reg_ops[i].op;
+
+ DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
+ switch (op) {
+ case CSR_ASSIGN:
+ IWX_WRITE(sc, addr, val);
+ break;
+ case CSR_SETBIT:
+ IWX_SETBITS(sc, addr, (1 << val));
+ break;
+ case CSR_CLEARBIT:
+ IWX_CLRBITS(sc, addr, (1 << val));
+ break;
+ case PRPH_ASSIGN:
+ iwx_write_prph(sc, addr, val);
+ break;
+ case PRPH_SETBIT:
+ err = iwx_set_bits_prph(sc, addr, (1 << val));
+ if (err)
+ return err;
+ break;
+ case PRPH_CLEARBIT:
+ err = iwx_clear_bits_prph(sc, addr, (1 << val));
+ if (err)
+ return err;
+ break;
+ case PRPH_BLOCKBIT:
+ if (iwx_read_prph(sc, addr) & (1 << val))
+ goto monitor;
+ break;
+ default:
+ DPRINTF(("%s: FW debug - unknown OP %d\n",
+ DEVNAME(sc), op));
+ break;
+ }
+ }
+
+monitor:
+ if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
+ iwx_write_prph(sc, le32toh(base_reg),
+ sc->fw_mon.paddr >> base_shift);
+ iwx_write_prph(sc, end_reg,
+ (sc->fw_mon.paddr + sc->fw_mon.size - 256)
+ >> end_shift);
+ }
+
+ iwx_nic_unlock(sc);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static void
+iwx_set_ltr(struct iwx_softc *sc)
+{
+ uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
+ ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
+ (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if (!sc->sc_integrated) {
+ IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (sc->sc_integrated &&
+ sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
+ iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
+ IWX_HPM_MAC_LRT_ENABLE_ALL);
+ iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
+ }
+}
+
+int
+iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
+{
+ struct iwx_context_info *ctxt_info;
+ struct iwx_context_info_rbd_cfg *rx_cfg;
+ uint32_t control_flags = 0;
+ uint64_t paddr;
+ int err;
+
+ ctxt_info = sc->ctxt_info_dma.vaddr;
+ memset(ctxt_info, 0, sizeof(*ctxt_info));
+
+ ctxt_info->version.version = 0;
+ ctxt_info->version.mac_id =
+ htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
+ /* size is in DWs */
+ ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
+
+ KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
+ ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
+
+ control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
+ (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
+ IWX_CTXT_INFO_RB_CB_SIZE_POS) |
+ (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
+ ctxt_info->control.control_flags = htole32(control_flags);
+
+ /* initialize RX default queue */
+ rx_cfg = &ctxt_info->rbd_cfg;
+ rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
+ rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
+ rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
+
+ /* initialize TX command queue */
+ ctxt_info->hcmd_cfg.cmd_queue_addr =
+ htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
+ ctxt_info->hcmd_cfg.cmd_queue_size =
+ IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
+
+ /* allocate ucode sections in dram and set addresses */
+ err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
+ if (err) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+
+ /* Configure debug, if exists */
+ if (sc->sc_fw.dbg_dest_tlv_v1) {
+#if 1
+ err = iwx_apply_debug_destination(sc);
+ if (err) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+#endif
+ }
+
+ /*
+ * Write the context info DMA base address. The device expects a
+ * 64-bit address but a simple bus_space_write_8 to this register
+ * won't work on some devices, such as the AX201.
+ */
+ paddr = sc->ctxt_info_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
+
+ /* kick FW self load */
+ if (!iwx_nic_lock(sc)) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return EBUSY;
+ }
+
+ iwx_set_ltr(sc);
+ iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
+ iwx_nic_unlock(sc);
+
+ /* Context info will be released upon alive or failure to get one */
+
+ return 0;
+}
+
+static int
+iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
+{
+ struct iwx_context_info_gen3 *ctxt_info_gen3;
+ struct iwx_prph_scratch *prph_scratch;
+ struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ uint16_t cb_size;
+ uint32_t control_flags, scratch_size;
+ uint64_t paddr;
+ int err;
+
+ if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
+ printf("%s: no image loader found in firmware file\n",
+ DEVNAME(sc));
+ iwx_ctxt_info_free_fw_img(sc);
+ return EINVAL;
+ }
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
+ sc->sc_fw.iml_len, 1);
+ if (err) {
+ printf("%s: could not allocate DMA memory for "
+ "firmware image loader\n", DEVNAME(sc));
+ iwx_ctxt_info_free_fw_img(sc);
+ return ENOMEM;
+ }
+
+ prph_scratch = sc->prph_scratch_dma.vaddr;
+ memset(prph_scratch, 0, sizeof(*prph_scratch));
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
+ prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
+ IWX_PRPH_SCRATCH_MTR_MODE |
+ (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
+ if (sc->sc_imr_enabled)
+ control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
+ prph_sc_ctrl->control.control_flags = htole32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ htole64(sc->rxq.free_desc_dma.paddr);
+
+ /* allocate ucode sections in dram and set addresses */
+ err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
+ if (err) {
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+
+ ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
+ memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
+ ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ htole64(sc->prph_scratch_dma.paddr);
+ scratch_size = sizeof(*prph_scratch);
+ ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ htole64(sc->rxq.stat_dma.paddr);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
+ ctxt_info_gen3->mtr_base_addr =
+ htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
+ ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
+ cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
+ ctxt_info_gen3->mtr_size = htole16(cb_size);
+ cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
+ ctxt_info_gen3->mcr_size = htole16(cb_size);
+
+ memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
+
+ paddr = sc->ctxt_info_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
+
+ paddr = sc->iml_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
+ IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
+
+ IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
+ IWX_CSR_AUTO_FUNC_BOOT_ENA);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s:%d kicking fw to get going\n", __func__, __LINE__);
+
+ /* kick FW self load */
+ if (!iwx_nic_lock(sc)) {
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+ return EBUSY;
+ }
+ iwx_set_ltr(sc);
+ iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
+ iwx_nic_unlock(sc);
+
+ /* Context info will be released upon alive or failure to get one */
+ return 0;
+}
+
+static void
+iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->fw)
+ return;
+
+ for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
+ iwx_dma_contig_free(&dram->fw[i]);
+
+ free(dram->fw, M_DEVBUF);
+ dram->lmac_cnt = 0;
+ dram->umac_cnt = 0;
+ dram->fw = NULL;
+}
+
+static int
+iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
+ const uint8_t *data, size_t dlen)
+{
+ struct iwx_fw_sects *fws;
+ struct iwx_fw_onesect *fwone;
+
+ if (type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+ if (dlen < sizeof(uint32_t))
+ return EINVAL;
+
+ fws = &sc->sc_fw.fw_sects[type];
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
+ if (fws->fw_count >= IWX_UCODE_SECT_MAX)
+ return EINVAL;
+
+ fwone = &fws->fw_sect[fws->fw_count];
+
+ /* first 32bit are device load offset */
+ memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
+
+ /* rest is data */
+ fwone->fws_data = data + sizeof(uint32_t);
+ fwone->fws_len = dlen - sizeof(uint32_t);
+
+ fws->fw_count++;
+ fws->fw_totlen += fwone->fws_len;
+
+ return 0;
+}
+
+#define IWX_DEFAULT_SCAN_CHANNELS 40
+/* Newer firmware might support more channels. Raise this value if needed. */
+#define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
+
+struct iwx_tlv_calib_data {
+ uint32_t ucode_type;
+ struct iwx_tlv_calib_ctrl calib;
+} __packed;
+
+static int
+iwx_set_default_calib(struct iwx_softc *sc, const void *data)
+{
+ const struct iwx_tlv_calib_data *def_calib = data;
+ uint32_t ucode_type = le32toh(def_calib->ucode_type);
+
+ if (ucode_type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+
+ sc->sc_default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ sc->sc_default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
+ return 0;
+}
+
+static void
+iwx_fw_info_free(struct iwx_fw_info *fw)
+{
+ free(fw->fw_rawdata, M_DEVBUF);
+ fw->fw_rawdata = NULL;
+ fw->fw_rawsize = 0;
+ /* don't touch fw->fw_status */
+ memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
+ free(fw->iml, M_DEVBUF);
+ fw->iml = NULL;
+ fw->iml_len = 0;
+}
+
+#define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
+
+static int
+iwx_read_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_info *fw = &sc->sc_fw;
+ const struct iwx_tlv_ucode_header *uhdr;
+ struct iwx_ucode_tlv tlv;
+ uint32_t tlv_type;
+ const uint8_t *data;
+ int err = 0;
+ size_t len;
+ const struct firmware *fwp;
+
+ if (fw->fw_status == IWX_FW_STATUS_DONE)
+ return 0;
+
+ fw->fw_status = IWX_FW_STATUS_INPROGRESS;
+ fwp = firmware_get(sc->sc_fwname);
+ sc->fwp = fwp;
+
+ if (fwp == NULL) {
+ printf("%s: could not read firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ err = ENOENT;
+ goto out;
+ }
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
+ __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
+
+
+ sc->sc_capaflags = 0;
+ sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
+ memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
+ memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
+ sc->n_cmd_versions = 0;
+
+ uhdr = (const void *)(fwp->data);
+ if (*(const uint32_t *)fwp->data != 0
+ || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
+ printf("%s: invalid firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ err = EINVAL;
+ goto out;
+ }
+
+ iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
+ IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
+ IWX_UCODE_MINOR(le32toh(uhdr->ver)),
+ IWX_UCODE_API(le32toh(uhdr->ver)));
+
+ data = uhdr->data;
+ len = fwp->datasize - sizeof(*uhdr);
+
+ while (len >= sizeof(tlv)) {
+ size_t tlv_len;
+ const void *tlv_data;
+
+ memcpy(&tlv, data, sizeof(tlv));
+ tlv_len = le32toh(tlv.length);
+ tlv_type = le32toh(tlv.type);
+
+ len -= sizeof(tlv);
+ data += sizeof(tlv);
+ tlv_data = data;
+
+ if (len < tlv_len) {
+ printf("%s: firmware too short: %zu bytes\n",
+ DEVNAME(sc), len);
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ switch (tlv_type) {
+ case IWX_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_max_probe_len
+ = le32toh(*(const uint32_t *)tlv_data);
+ if (sc->sc_capa_max_probe_len >
+ IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ case IWX_UCODE_TLV_PAN:
+ if (tlv_len) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWX_UCODE_TLV_FLAGS:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ /*
+ * Apparently there can be many flags, but Linux driver
+ * parses only the first one, and so do we.
+ *
+ * XXX: why does this override IWX_UCODE_TLV_PAN?
+ * Intentional or a bug? Observations from
+ * current firmware file:
+ * 1) TLV_PAN is parsed first
+ * 2) TLV_FLAGS contains TLV_FLAGS_PAN
+ * ==> this resets TLV_PAN to itself... hnnnk
+ */
+ sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
+ break;
+ case IWX_UCODE_TLV_CSCHEME:
+ err = iwx_store_cscheme(sc, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_NUM_OF_CPU: {
+ uint32_t num_cpu;
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ num_cpu = le32toh(*(const uint32_t *)tlv_data);
+ if (num_cpu < 1 || num_cpu > 2) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ }
+ case IWX_UCODE_TLV_SEC_RT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_INIT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_WOWLAN:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_DEF_CALIB:
+ if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ err = iwx_set_default_calib(sc, tlv_data);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_PHY_SKU:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
+ break;
+
+ case IWX_UCODE_TLV_API_CHANGES_SET: {
+ const struct iwx_ucode_api *api;
+ int idx, i;
+ if (tlv_len != sizeof(*api)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ api = (const struct iwx_ucode_api *)tlv_data;
+ idx = le32toh(api->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(api->api_flags) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_ucode_api, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
+ const struct iwx_ucode_capa *capa;
+ int idx, i;
+ if (tlv_len != sizeof(*capa)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ capa = (const struct iwx_ucode_capa *)tlv_data;
+ idx = le32toh(capa->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(capa->api_capa) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_enabled_capa, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
+ case IWX_UCODE_TLV_FW_GSCAN_CAPA:
+ /* ignore, not used by current driver */
+ break;
+
+ case IWX_UCODE_TLV_SEC_RT_USNIFFER:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
+ tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+
+ case IWX_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_N_SCAN_CHANNELS:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_n_scan_channels =
+ le32toh(*(const uint32_t *)tlv_data);
+ if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
+ err = ERANGE;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_FW_VERSION:
+ if (tlv_len != sizeof(uint32_t) * 3) {
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
+ le32toh(((const uint32_t *)tlv_data)[0]),
+ le32toh(((const uint32_t *)tlv_data)[1]),
+ le32toh(((const uint32_t *)tlv_data)[2]));
+ break;
+
+ case IWX_UCODE_TLV_FW_DBG_DEST: {
+ const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+
+ fw->dbg_dest_ver = (const uint8_t *)tlv_data;
+ if (*fw->dbg_dest_ver != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ if (fw->dbg_dest_tlv_init)
+ break;
+ fw->dbg_dest_tlv_init = true;
+
+ dest_v1 = (const void *)tlv_data;
+ fw->dbg_dest_tlv_v1 = dest_v1;
+ fw->n_dest_reg = tlv_len -
+ offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
+ fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: found debug dest; n_dest_reg=%d\n",
+ __func__, fw->n_dest_reg);
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_DBG_CONF: {
+ const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
+
+ if (!fw->dbg_dest_tlv_init ||
+ conf->id >= nitems(fw->dbg_conf_tlv) ||
+ fw->dbg_conf_tlv[conf->id] != NULL)
+ break;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "Found debug configuration: %d\n", conf->id);
+ fw->dbg_conf_tlv[conf->id] = conf;
+ fw->dbg_conf_tlv_len[conf->id] = tlv_len;
+ break;
+ }
+
+ case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
+ const struct iwx_umac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_umac_error_event_table =
+ le32toh(dbg_ptrs->error_info_addr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_UMAC;
+ break;
+ }
+
+ case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
+ const struct iwx_lmac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_lmac_error_event_table[0] =
+ le32toh(dbg_ptrs->error_event_table_ptr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_LMAC1;
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_MEM_SEG:
+ break;
+
+ case IWX_UCODE_TLV_IML:
+ if (sc->sc_fw.iml != NULL) {
+ free(fw->iml, M_DEVBUF);
+ fw->iml_len = 0;
+ }
+ sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ if (sc->sc_fw.iml == NULL) {
+ err = ENOMEM;
+ goto parse_out;
+ }
+ memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
+ sc->sc_fw.iml_len = tlv_len;
+ break;
+
+ case IWX_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
+ tlv_len /= sizeof(struct iwx_fw_cmd_version);
+ tlv_len *= sizeof(struct iwx_fw_cmd_version);
+ }
+ if (sc->n_cmd_versions != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (tlv_len > sizeof(sc->cmd_versions)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
+ sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
+ break;
+
+ case IWX_UCODE_TLV_FW_RECOVERY_INFO:
+ break;
+
+ case IWX_UCODE_TLV_FW_FSEQ_VERSION:
+ case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
+ case IWX_UCODE_TLV_FW_NUM_STATIONS:
+ case IWX_UCODE_TLV_FW_NUM_BEACONS:
+ break;
+
+ /* undocumented TLVs found in iwx-cc-a0-46 image */
+ case 58:
+ case 0x1000003:
+ case 0x1000004:
+ break;
+
+ /* undocumented TLVs found in iwx-cc-a0-48 image */
+ case 0x1000000:
+ case 0x1000002:
+ break;
+
+ case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
+ case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ case IWX_UCODE_TLV_TYPE_HCMD:
+ case IWX_UCODE_TLV_TYPE_REGIONS:
+ case IWX_UCODE_TLV_TYPE_TRIGGERS:
+ case IWX_UCODE_TLV_TYPE_CONF_SET:
+ case IWX_UCODE_TLV_SEC_TABLE_ADDR:
+ case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
+ case IWX_UCODE_TLV_CURRENT_PC:
+ break;
+
+ /* undocumented TLV found in iwx-cc-a0-67 image */
+ case 0x100000b:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
+ case 0x101:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
+ case 0x100000c:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
+ case 69:
+ break;
+
+ default:
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ /*
+ * Check for size_t overflow and ignore missing padding at
+ * end of firmware file.
+ */
+ if (roundup(tlv_len, 4) > len)
+ break;
+
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+
+ KASSERT(err == 0, ("unhandled fw parse error"));
+
+parse_out:
+ if (err) {
+ printf("%s: firmware parse error %d, "
+ "section type %d\n", DEVNAME(sc), err, tlv_type);
+ }
+
+out:
+ if (err) {
+ fw->fw_status = IWX_FW_STATUS_NONE;
+ if (fw->fw_rawdata != NULL)
+ iwx_fw_info_free(fw);
+ } else
+ fw->fw_status = IWX_FW_STATUS_DONE;
+ return err;
+}
+
+static uint32_t
+iwx_prph_addr_mask(struct iwx_softc *sc)
+{
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ return 0x00ffffff;
+ else
+ return 0x000fffff;
+}
+
+static uint32_t
+iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
+{
+ uint32_t mask = iwx_prph_addr_mask(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
+ IWX_BARRIER_READ_WRITE(sc);
+ return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
+}
+
+uint32_t
+iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
+{
+ iwx_nic_assert_locked(sc);
+ return iwx_read_prph_unlocked(sc, addr);
+}
+
+static void
+iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ uint32_t mask = iwx_prph_addr_mask(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
+ IWX_BARRIER_WRITE(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
+}
+
+static void
+iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwx_nic_assert_locked(sc);
+ iwx_write_prph_unlocked(sc, addr, val);
+}
+
+static uint32_t
+iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
+{
+ return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
+}
+
+static void
+iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
+}
+
+static int
+iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
+{
+ int offs, err = 0;
+ uint32_t *vals = buf;
+
+ if (iwx_nic_lock(sc)) {
+ IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
+ iwx_nic_unlock(sc);
+ } else {
+ err = EBUSY;
+ }
+ return err;
+}
+
+static int
+iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
+ int timo)
+{
+ for (;;) {
+ if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
+ return 1;
+ }
+ if (timo < 10) {
+ return 0;
+ }
+ timo -= 10;
+ DELAY(10);
+ }
+}
+
+static int
+iwx_nic_lock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ iwx_nic_assert_locked(sc);
+ sc->sc_nic_locks++;
+ return 1; /* already locked */
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ DELAY(2);
+
+ if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
+ | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
+ sc->sc_nic_locks++;
+ return 1;
+ }
+
+ printf("%s: acquiring device failed\n", DEVNAME(sc));
+ return 0;
+}
+
+static void
+iwx_nic_assert_locked(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks <= 0)
+ panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
+}
+
+static void
+iwx_nic_unlock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ if (--sc->sc_nic_locks == 0)
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else
+ printf("%s: NIC already unlocked\n", DEVNAME(sc));
+}
+
+static int
+iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
+ uint32_t mask)
+{
+ uint32_t val;
+
+ if (iwx_nic_lock(sc)) {
+ val = iwx_read_prph(sc, reg) & mask;
+ val |= bits;
+ iwx_write_prph(sc, reg, val);
+ iwx_nic_unlock(sc);
+ return 0;
+ }
+ return EBUSY;
+}
+
+static int
+iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
+}
+
+static int
+iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
+}
+
+static void
+iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ if (error != 0)
+ return;
+ KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+static int
+iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
+ bus_size_t size, bus_size_t alignment)
+{
+ int error;
+
+ dma->tag = NULL;
+ dma->map = NULL;
+ dma->size = size;
+ dma->vaddr = NULL;
+
+ error = bus_dma_tag_create(tag, alignment,
+ 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
+ 1, size, 0, NULL, NULL, &dma->tag);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
+ iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ dma->vaddr = NULL;
+ goto fail;
+ }
+
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+
+ return 0;
+
+fail:
+ iwx_dma_contig_free(dma);
+ return error;
+}
+
+static void
+iwx_dma_contig_free(struct iwx_dma_info *dma)
+{
+ if (dma->vaddr != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ dma->vaddr = NULL;
+ }
+ if (dma->tag != NULL) {
+ bus_dma_tag_destroy(dma->tag);
+ dma->tag = NULL;
+ }
+}
+
+static int
+iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ bus_size_t size;
+ int i, err;
+
+ ring->cur = 0;
+
+ /* Allocate RX descriptors (256-byte aligned). */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(struct iwx_rx_transfer_desc);
+ else
+ size = sizeof(uint64_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
+ size * IWX_RX_MQ_RING_COUNT, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX ring DMA memory\n");
+ goto fail;
+ }
+ ring->desc = ring->free_desc_dma.vaddr;
+
+ /* Allocate RX status area (16-byte aligned). */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(uint16_t);
+ else
+ size = sizeof(*ring->stat);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX status DMA memory\n");
+ goto fail;
+ }
+ ring->stat = ring->stat_dma.vaddr;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(struct iwx_rx_completion_desc);
+ else
+ size = sizeof(uint32_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
+ size * IWX_RX_MQ_RING_COUNT, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX ring DMA memory\n");
+ goto fail;
+ }
+
+ err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
+ 0, NULL, NULL, &ring->data_dmat);
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+
+ memset(data, 0, sizeof(*data));
+ err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not create RX buf DMA map\n");
+ goto fail;
+ }
+
+ err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+fail: iwx_free_rx_ring(sc, ring);
+ return err;
+}
+
+static void
+iwx_disable_rx_dma(struct iwx_softc *sc)
+{
+ int ntries;
+
+ if (iwx_nic_lock(sc)) {
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
+ else
+ iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ if (iwx_read_umac_prph(sc,
+ IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
+ break;
+ } else {
+ if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
+ IWX_RXF_DMA_IDLE)
+ break;
+ }
+ DELAY(10);
+ }
+ iwx_nic_unlock(sc);
+ }
+}
+
+static void
+iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ ring->cur = 0;
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ uint16_t *status = sc->rxq.stat_dma.vaddr;
+ *status = 0;
+ } else
+ memset(ring->stat, 0, sizeof(*ring->stat));
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
+ BUS_DMASYNC_POSTWRITE);
+
+}
+
+static void
+iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ int i;
+
+ iwx_dma_contig_free(&ring->free_desc_dma);
+ iwx_dma_contig_free(&ring->stat_dma);
+ iwx_dma_contig_free(&ring->used_desc_dma);
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL) {
+ bus_dmamap_destroy(ring->data_dmat, data->map);
+ data->map = NULL;
+ }
+ }
+ if (ring->data_dmat != NULL) {
+ bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
+}
+
+static int
+iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
+{
+ bus_addr_t paddr;
+ bus_size_t size;
+ int i, err;
+ size_t bc_tbl_size;
+ bus_size_t bc_align;
+ size_t mapsize;
+
+ ring->qid = qid;
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->cur_hw = 0;
+ ring->tail = 0;
+ ring->tail_hw = 0;
+
+ /* Allocate TX descriptors (256-byte aligned). */
+ size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate TX ring DMA memory\n");
+ goto fail;
+ }
+ ring->desc = ring->desc_dma.vaddr;
+
+ /*
+ * The hardware supports up to 512 Tx rings which is more
+ * than we currently need.
+ *
+ * In DQA mode we use 1 command queue + 1 default queue for
+ * management, control, and non-QoS data frames.
+ * The command is queue sc->txq[0], our default queue is sc->txq[1].
+ *
+ * Tx aggregation requires additional queues, one queue per TID for
+ * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
+ * Firmware may assign its own internal IDs for these queues
+ * depending on which TID gets aggregation enabled first.
+ * The driver maintains a table mapping driver-side queue IDs
+ * to firmware-side queue IDs.
+ */
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
+ IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ bc_align = 128;
+ } else {
+ bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
+ bc_align = 64;
+ }
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
+ bc_align);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate byte count table DMA memory\n");
+ goto fail;
+ }
+
+ size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
+ IWX_FIRST_TB_SIZE_ALIGN);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate cmd DMA memory\n");
+ goto fail;
+ }
+ ring->cmd = ring->cmd_dma.vaddr;
+
+ /* FW commands may require more mapped space than packets. */
+ if (qid == IWX_DQA_CMD_QUEUE)
+ mapsize = (sizeof(struct iwx_cmd_header) +
+ IWX_MAX_CMD_PAYLOAD_SIZE);
+ else
+ mapsize = MCLBYTES;
+ err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
+ mapsize, 0, NULL, NULL, &ring->data_dmat);
+
+ paddr = ring->cmd_dma.paddr;
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ data->cmd_paddr = paddr;
+ paddr += sizeof(struct iwx_device_cmd);
+
+ err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not create TX buf DMA map\n");
+ goto fail;
+ }
+ }
+ KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
+ return 0;
+
+fail:
+ return err;
+}
+
+static void
+iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ }
+
+ /* Clear byte count table. */
+ memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
+
+ /* Clear TX descriptors. */
+ memset(ring->desc, 0, ring->desc_dma.size);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ sc->qfullmsk &= ~(1 << ring->qid);
+ sc->qenablemsk &= ~(1 << ring->qid);
+ for (i = 0; i < nitems(sc->aggqid); i++) {
+ if (sc->aggqid[i] == ring->qid) {
+ sc->aggqid[i] = 0;
+ break;
+ }
+ }
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->cur_hw = 0;
+ ring->tail = 0;
+ ring->tail_hw = 0;
+ ring->tid = 0;
+}
+
+static void
+iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i;
+
+ iwx_dma_contig_free(&ring->desc_dma);
+ iwx_dma_contig_free(&ring->cmd_dma);
+ iwx_dma_contig_free(&ring->bc_tbl);
+
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL) {
+ bus_dmamap_destroy(ring->data_dmat, data->map);
+ data->map = NULL;
+ }
+ }
+ if (ring->data_dmat != NULL) {
+ bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
+}
+
+static void
+iwx_enable_rfkill_int(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
+}
+
+static int
+iwx_check_rfkill(struct iwx_softc *sc)
+{
+ uint32_t v;
+ int rv;
+
+ /*
+ * "documentation" is not really helpful here:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ *
+ * But apparently when it's off, it's on ...
+ */
+ v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
+ rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
+ if (rv) {
+ sc->sc_flags |= IWX_FLAG_RFKILL;
+ } else {
+ sc->sc_flags &= ~IWX_FLAG_RFKILL;
+ }
+
+ return rv;
+}
+
+static void
+iwx_enable_interrupts(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INI_SET_MASK;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ /*
+ * fh/hw_mask keeps all the unmasked causes.
+ * Unlike msi, in msix cause is enabled when it is unset.
+ */
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~sc->sc_hw_mask);
+ }
+}
+
+static void
+iwx_enable_fwload_interrupt(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_init_mask);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ }
+}
+
+static void
+iwx_restore_interrupts(struct iwx_softc *sc)
+{
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+}
+
+static void
+iwx_disable_interrupts(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ /* acknowledge all interrupts */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ sc->sc_hw_init_mask);
+ }
+}
+
+static void
+iwx_ict_reset(struct iwx_softc *sc)
+{
+ iwx_disable_interrupts(sc);
+
+ memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
+ sc->ict_cur = 0;
+
+ /* Set physical address of ICT (4KB aligned). */
+ IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
+ IWX_CSR_DRAM_INT_TBL_ENABLE
+ | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
+ | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
+ | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
+
+ /* Switch to ICT interrupt mode in driver. */
+ sc->sc_flags |= IWX_FLAG_USE_ICT;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ iwx_enable_interrupts(sc);
+}
+
+#define IWX_HW_READY_TIMEOUT 50
+static int
+iwx_set_hw_ready(struct iwx_softc *sc)
+{
+ int ready;
+
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_HW_READY_TIMEOUT);
+ if (ready)
+ IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
+ IWX_CSR_MBOX_SET_REG_OS_ALIVE);
+
+ DPRINTF(("%s: ready=%d\n", __func__, ready));
+ return ready;
+}
+#undef IWX_HW_READY_TIMEOUT
+
+static int
+iwx_prepare_card_hw(struct iwx_softc *sc)
+{
+ int t = 0;
+ int ntries;
+
+ if (iwx_set_hw_ready(sc))
+ return 0;
+
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(1000);
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ if (iwx_set_hw_ready(sc))
+ return 0;
+ DELAY(200);
+ t += 200;
+ } while (t < 150000);
+ DELAY(25000);
+ }
+
+ return ETIMEDOUT;
+}
+
+static int
+iwx_force_power_gating(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ if (err)
+ return err;
+ DELAY(20);
+ err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
+ IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ if (err)
+ return err;
+ DELAY(20);
+ err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ return err;
+}
+
+static void
+iwx_apm_config(struct iwx_softc *sc)
+{
+ uint16_t lctl, cap;
+ int pcie_ptr;
+ int error;
+
+ /*
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
+
+ error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
+ if (error != 0) {
+ printf("can't fill pcie_ptr\n");
+ return;
+ }
+
+ lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
+ sizeof(lctl));
+#define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
+ sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
+#define PCI_PCIE_DCSR2 0x28
+ cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
+ sizeof(lctl));
+#define PCI_PCIE_DCSR2_LTREN 0x00000400
+ sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
+#define PCI_PCIE_LCSR_ASPM_L1 0x00000002
+ DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
+ DEVNAME(sc),
+ (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
+ sc->sc_ltr_enabled ? "En" : "Dis"));
+#undef PCI_PCIE_LCSR_ASPM_L0S
+#undef PCI_PCIE_DCSR2
+#undef PCI_PCIE_DCSR2_LTREN
+#undef PCI_PCIE_LCSR_ASPM_L1
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * e.g. after platform boot or shutdown.
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+iwx_apm_init(struct iwx_softc *sc)
+{
+ int err = 0;
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
+ IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwx_apm_config(sc);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwx_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: timeout waiting for clock stabilization\n",
+ DEVNAME(sc));
+ err = ETIMEDOUT;
+ goto out;
+ }
+ out:
+ if (err)
+ printf("%s: apm init error %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+static void
+iwx_apm_stop(struct iwx_softc *sc)
+{
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
+ IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ DELAY(1000);
+ IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(5000);
+
+ /* stop device's busmaster DMA activity */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ if (!iwx_poll_bit(sc, IWX_CSR_RESET,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
+ printf("%s: timeout waiting for master\n", DEVNAME(sc));
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static void
+iwx_init_msix_hw(struct iwx_softc *sc)
+{
+ iwx_conf_msix_hw(sc, 0);
+
+ if (!sc->sc_msix)
+ return;
+
+ sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+}
+
+static void
+iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
+{
+ int vector = 0;
+
+ if (!sc->sc_msix) {
+ /* Newer chips default to MSIX. */
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_umac_prph(sc, IWX_UREG_CHICK,
+ IWX_UREG_CHICK_MSI_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+ return;
+ }
+
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_umac_prph(sc, IWX_UREG_CHICK,
+ IWX_UREG_CHICK_MSIX_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+
+ /* Disable all interrupts */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
+
+ /* Map fallback-queue (command/mgmt) to a single vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ /* Map RSS queue (data) to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable the RX queues cause interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
+
+ /* Map non-RX causes to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable non-RX causes interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
+ IWX_MSIX_FH_INT_CAUSES_S2D |
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR);
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
+ IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD |
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP);
+}
+
+static int
+iwx_clear_persistence_bit(struct iwx_softc *sc)
+{
+ uint32_t hpm, wprot;
+
+ hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
+ if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
+ wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
+ if (wprot & IWX_PREG_WFPM_ACCESS) {
+ printf("%s: cannot clear persistence bit\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+ iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
+ hpm & ~IWX_PERSISTENCE_BIT);
+ }
+
+ return 0;
+}
+
+static int
+iwx_start_hw(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err)
+ return err;
+
+ if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
+ err = iwx_clear_persistence_bit(sc);
+ if (err)
+ return err;
+ }
+
+ /* Reset the entire device */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
+ sc->sc_integrated) {
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ DELAY(20);
+ if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: timeout waiting for clock stabilization\n",
+ DEVNAME(sc));
+ return ETIMEDOUT;
+ }
+
+ err = iwx_force_power_gating(sc);
+ if (err)
+ return err;
+
+ /* Reset the entire device */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+ }
+
+ err = iwx_apm_init(sc);
+ if (err)
+ return err;
+
+ iwx_init_msix_hw(sc);
+
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ return 0;
+}
+
+static void
+iwx_stop_device(struct iwx_softc *sc)
+{
+ int i;
+
+ iwx_disable_interrupts(sc);
+ sc->sc_flags &= ~IWX_FLAG_USE_ICT;
+
+ iwx_disable_rx_dma(sc);
+ iwx_reset_rx_ring(sc, &sc->rxq);
+ for (i = 0; i < nitems(sc->txq); i++)
+ iwx_reset_tx_ring(sc, &sc->txq[i]);
+#if 0
+ /* XXX-THJ: Tidy up BA state on stop */
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
+#endif
+ /* Make sure (redundant) we've released our request to stay awake */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (sc->sc_nic_locks > 0)
+ printf("%s: %d active NIC locks forcefully cleared\n",
+ DEVNAME(sc), sc->sc_nic_locks);
+ sc->sc_nic_locks = 0;
+
+ /* Stop the device, and put it in low power state */
+ iwx_apm_stop(sc);
+
+ /* Reset the on-board processor. */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwx_conf_msix_hw(sc, 1);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clear the interrupt again.
+ */
+ iwx_disable_interrupts(sc);
+
+ /* Even though we stop the HW we still want the RF kill interrupt. */
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ iwx_prepare_card_hw(sc);
+
+ iwx_ctxt_info_free_paging(sc);
+ iwx_dma_contig_free(&sc->pnvm_dma);
+}
+
+static void
+iwx_nic_config(struct iwx_softc *sc)
+{
+ uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ uint32_t mask, val, reg_val = 0;
+
+ radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
+ IWX_FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
+ IWX_FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
+ IWX_FW_PHY_CFG_RADIO_DASH_POS;
+
+ reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+ val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
+ val &= ~mask;
+ val |= reg_val;
+ IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
+}
+
+static int
+iwx_nic_rx_init(struct iwx_softc *sc)
+{
+ IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
+
+ /*
+ * We don't configure the RFH; the firmware will do that.
+ * Rx descriptors are set when firmware sends an ALIVE interrupt.
+ */
+ return 0;
+}
+
+static int
+iwx_nic_init(struct iwx_softc *sc)
+{
+ int err;
+
+ iwx_apm_init(sc);
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
+ iwx_nic_config(sc);
+
+ err = iwx_nic_rx_init(sc);
+ if (err)
+ return err;
+
+ IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
+
+ return 0;
+}
+
+/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
+const uint8_t iwx_ac_to_tx_fifo[] = {
+ IWX_GEN2_EDCA_TX_FIFO_BE,
+ IWX_GEN2_EDCA_TX_FIFO_BK,
+ IWX_GEN2_EDCA_TX_FIFO_VI,
+ IWX_GEN2_EDCA_TX_FIFO_VO,
+};
+
+static int
+iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
+ int num_slots)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_tx_queue_cfg_cmd cmd_v0;
+ struct iwx_scd_queue_cfg_cmd cmd_v3;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err, fwqid, cmd_ver;
+ uint32_t wr_idx;
+ size_t resp_len;
+
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ DPRINTF(("%s: qid=%i\n", __func__, qid));
+ iwx_reset_tx_ring(sc, ring);
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
+ memset(&cmd_v0, 0, sizeof(cmd_v0));
+ cmd_v0.sta_id = sta_id;
+ cmd_v0.tid = tid;
+ cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
+ cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
+ cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
+ hcmd.id = IWX_SCD_QUEUE_CFG;
+ hcmd.data[0] = &cmd_v0;
+ hcmd.len[0] = sizeof(cmd_v0);
+ } else if (cmd_ver == 3) {
+ memset(&cmd_v3, 0, sizeof(cmd_v3));
+ cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
+ cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
+ cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
+ cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
+ cmd_v3.u.add.flags = htole32(0);
+ cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
+ cmd_v3.u.add.tid = tid;
+ hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ hcmd.data[0] = &cmd_v3;
+ hcmd.len[0] = sizeof(cmd_v3);
+ } else {
+ printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
+ DEVNAME(sc), cmd_ver);
+ return ENOTSUP;
+ }
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ fwqid = le16toh(resp->queue_number);
+ wr_idx = le16toh(resp->write_pointer);
+
+ /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
+ if (fwqid != qid) {
+ DPRINTF(("%s: === fwqid != qid\n", __func__));
+ err = EIO;
+ goto out;
+ }
+
+ if (wr_idx != ring->cur_hw) {
+ DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
+ err = EIO;
+ goto out;
+ }
+
+ sc->qenablemsk |= (1 << qid);
+ ring->tid = tid;
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static int
+iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_tx_queue_cfg_cmd cmd_v0;
+ struct iwx_scd_queue_cfg_cmd cmd_v3;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err, cmd_ver;
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
+ memset(&cmd_v0, 0, sizeof(cmd_v0));
+ cmd_v0.sta_id = sta_id;
+ cmd_v0.tid = tid;
+ cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
+ cmd_v0.cb_size = htole32(0);
+ cmd_v0.byte_cnt_addr = htole64(0);
+ cmd_v0.tfdq_addr = htole64(0);
+ hcmd.id = IWX_SCD_QUEUE_CFG;
+ hcmd.data[0] = &cmd_v0;
+ hcmd.len[0] = sizeof(cmd_v0);
+ } else if (cmd_ver == 3) {
+ memset(&cmd_v3, 0, sizeof(cmd_v3));
+ cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
+ cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
+ cmd_v3.u.remove.tid = tid;
+ hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ hcmd.data[0] = &cmd_v3;
+ hcmd.len[0] = sizeof(cmd_v3);
+ } else {
+ printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
+ DEVNAME(sc), cmd_ver);
+ return ENOTSUP;
+ }
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ sc->qenablemsk &= ~(1 << qid);
+ iwx_reset_tx_ring(sc, ring);
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static void
+iwx_post_alive(struct iwx_softc *sc)
+{
+ int txcmd_ver;
+
+ iwx_ict_reset(sc);
+
+ txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
+ if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
+ sc->sc_rate_n_flags_version = 2;
+ else
+ sc->sc_rate_n_flags_version = 1;
+
+ txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
+}
+
+static int
+iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
+ uint32_t duration_tu)
+{
+
+ struct iwx_session_prot_cmd cmd = {
+ .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color)),
+ .action = htole32(IWX_FW_CTXT_ACTION_ADD),
+ .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = htole32(duration_tu),
+ };
+ uint32_t cmd_id;
+ int err;
+
+ cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
+ err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+ if (!err)
+ sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
+ return err;
+}
+
+static void
+iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_session_prot_cmd cmd = {
+ .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color)),
+ .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
+ .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = 0,
+ };
+ uint32_t cmd_id;
+
+ /* Do nothing if the time event has already ended. */
+ if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
+ return;
+
+ cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
+ if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+}
+
+/*
+ * NVM read access and content parsing. We do not support
+ * external NVM or writing NVM.
+ */
+
+static uint8_t
+iwx_fw_valid_tx_ant(struct iwx_softc *sc)
+{
+ uint8_t tx_ant;
+
+ tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
+ >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_tx_ant)
+ tx_ant &= sc->sc_nvm.valid_tx_ant;
+
+ return tx_ant;
+}
+
+static uint8_t
+iwx_fw_valid_rx_ant(struct iwx_softc *sc)
+{
+ uint8_t rx_ant;
+
+ rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
+ >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_rx_ant)
+ rx_ant &= sc->sc_nvm.valid_rx_ant;
+
+ return rx_ant;
+}
+
+static void
+iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
+ struct ieee80211_channel chans[])
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_nvm_data *data = &sc->sc_nvm;
+ int ch_idx;
+ uint32_t ch_flags;
+ int nchan;
+ const uint8_t *nvm_channels;
+ int nchan_profile = sc->sc_rsp_vers == IWX_FBSD_RSP_V4 ?
+ IWX_NUM_CHANNELS: IWX_NUM_CHANNELS_V1;
+ uint8_t bands[IEEE80211_MODE_BYTES];
+ uint8_t channel_list_2ghz[IEEE80211_CHAN_MAX];
+ uint8_t channel_list_5ghz[IEEE80211_CHAN_MAX];
+ int channels_2ghz = 0;
+ int channels_5ghz = 0;
+ memset(channel_list_2ghz, 0, sizeof(channel_list_2ghz));
+ memset(channel_list_5ghz, 0, sizeof(channel_list_5ghz));
+
+ if (sc->sc_uhb_supported) {
+ nchan = nitems(iwx_nvm_channels_uhb);
+ nvm_channels = iwx_nvm_channels_uhb;
+ } else {
+ nchan = nitems(iwx_nvm_channels_8000);
+ nvm_channels = iwx_nvm_channels_8000;
+ }
+
+ for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
+ if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
+ ch_flags = le32_to_cpup(
+ sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
+ } else {
+ ch_flags = le16_to_cpup(
+ sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
+ }
+
+ /* net80211 cannot handle 6 GHz channel numbers yet */
+ if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
+ break;
+
+ if (!(ch_flags & IWX_NVM_CHANNEL_VALID))
+ continue;
+ else {
+ if (ch_idx < IWX_NUM_2GHZ_CHANNELS) {
+ channel_list_2ghz[channels_2ghz++] = nvm_channels[ch_idx];
+ } else if ((ch_idx >= IWX_NUM_2GHZ_CHANNELS) &&
+ (ch_idx < IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)) {
+ channel_list_5ghz[channels_5ghz++] = nvm_channels[ch_idx];
+ } else
+ panic("unsupported channel band");
+ }
+ }
+
+ /* 1-13: 11b/g channels. */
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+ setbit(bands, IEEE80211_MODE_11G);
+ setbit(bands, IEEE80211_MODE_11NG);
+
+ ieee80211_add_channel_list_2ghz(ic->ic_channels, maxchans, nchans,
+ channel_list_2ghz,
+ channels_2ghz, bands, 0);
+
+ if (data->sku_cap_band_52GHz_enable) {
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11A);
+ setbit(bands, IEEE80211_MODE_11NA);
+ setbit(bands, IEEE80211_MODE_VHT_5GHZ);
+
+ DPRINTF(("%s: will add 5ghz chans\n", __func__));
+ ieee80211_add_channel_list_5ghz(ic->ic_channels, maxchans, nchans,
+ channel_list_5ghz,
+ channels_5ghz, bands,
+ 0);
+ }
+}
+
+static int
+iwx_mimo_enabled(struct iwx_softc *sc)
+{
+
+ return !sc->sc_nvm.sku_cap_mimo_disable;
+}
+
+static void
+iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
+ uint16_t ssn, uint16_t buf_size)
+{
+ reorder_buf->head_sn = ssn;
+ reorder_buf->num_stored = 0;
+ reorder_buf->buf_size = buf_size;
+ reorder_buf->last_amsdu = 0;
+ reorder_buf->last_sub_index = 0;
+ reorder_buf->removed = 0;
+ reorder_buf->valid = 0;
+ reorder_buf->consec_oldsn_drops = 0;
+ reorder_buf->consec_oldsn_ampdu_gp2 = 0;
+ reorder_buf->consec_oldsn_prev_drop = 0;
+}
+
+static void
+iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
+{
+ struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
+
+ reorder_buf->removed = 1;
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+}
+
+#define IWX_MAX_RX_BA_SESSIONS 16
+
+static struct iwx_rxba_data *
+iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
+{
+ int i;
+
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ if (sc->sc_rxba_data[i].baid ==
+ IWX_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ if (sc->sc_rxba_data[i].tid == tid)
+ return &sc->sc_rxba_data[i];
+ }
+
+ return NULL;
+}
+
+static int
+iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
+ uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
+ uint8_t *baid)
+{
+ struct iwx_rx_baid_cfg_cmd cmd;
+ uint32_t new_baid = 0;
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (start) {
+ cmd.action = IWX_RX_BAID_ACTION_ADD;
+ cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = htole16(ssn);
+ cmd.alloc.win_size = htole16(winsize);
+ } else {
+ struct iwx_rxba_data *rxba;
+
+ rxba = iwx_find_rxba_data(sc, tid);
+ if (rxba == NULL)
+ return ENOENT;
+ *baid = rxba->baid;
+
+ cmd.action = IWX_RX_BAID_ACTION_REMOVE;
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
+ cmd.remove_v1.baid = rxba->baid;
+ } else {
+ cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
+ cmd.remove.tid = tid;
+ }
+ }
+
+ err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
+ if (err)
+ return err;
+
+ if (start) {
+ if (new_baid >= nitems(sc->sc_rxba_data))
+ return ERANGE;
+ *baid = new_baid;
+ }
+
+ return 0;
+}
+
+static void
+iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
+ uint16_t ssn, uint16_t winsize, int timeout_val, int start)
+{
+ int err;
+ struct iwx_rxba_data *rxba = NULL;
+ uint8_t baid = 0;
+
+ if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
+ return;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
+ err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
+ timeout_val, start, &baid);
+ } else {
+ panic("sta_rx_agg unsupported hw");
+ }
+ if (err) {
+ DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
+ return;
+ } else
+ DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
+
+ rxba = &sc->sc_rxba_data[baid];
+
+ /* Deaggregation is done in hardware. */
+ if (start) {
+ if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
+ return;
+ }
+ rxba->sta_id = IWX_STATION_ID;
+ rxba->tid = tid;
+ rxba->baid = baid;
+ rxba->timeout = timeout_val;
+ getmicrouptime(&rxba->last_rx);
+ iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
+ winsize);
+ if (timeout_val != 0) {
+ DPRINTF(("%s: timeout_val != 0\n", __func__));
+ return;
+ }
+ } else
+ iwx_clear_reorder_buffer(sc, rxba);
+
+ if (start) {
+ sc->sc_rx_ba_sessions++;
+ } else if (sc->sc_rx_ba_sessions > 0)
+ sc->sc_rx_ba_sessions--;
+}
+
+static void
+iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ int err, qid;
+
+ qid = sc->aggqid[tid];
+ if (qid == 0) {
+ /* Firmware should pick the next unused Tx queue. */
+ qid = fls(sc->qenablemsk);
+ }
+
+ DPRINTF(("%s: qid=%i\n", __func__, qid));
+
+ /*
+ * Simply enable the queue.
+ * Firmware handles Tx Ba session setup and teardown.
+ */
+ if ((sc->qenablemsk & (1 << qid)) == 0) {
+ if (!iwx_nic_lock(sc)) {
+ return;
+ }
+ err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
+ IWX_TX_RING_COUNT);
+ iwx_nic_unlock(sc);
+ if (err) {
+ printf("%s: could not enable Tx queue %d "
+ "(error %d)\n", DEVNAME(sc), qid, err);
+ return;
+ }
+ }
+ ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
+ DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
+ sc->aggqid[tid] = qid;
+}
+
+static void
+iwx_ba_rx_task(void *arg, int npending __unused)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int tid;
+
+ IWX_LOCK(sc);
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_rx.start_tidmask & (1 << tid)) {
+ struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
+ DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
+ ba->ba_flags));
+ if (ba->ba_flags == IWX_BA_DONE) {
+ DPRINTF(("%s: ampdu for tid %i already added\n",
+ __func__, tid));
+ break;
+ }
+
+ DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
+ tid));
+ iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
+ ba->ba_winsize, ba->ba_timeout_val, 1);
+ sc->ba_rx.start_tidmask &= ~(1 << tid);
+ ba->ba_flags = IWX_BA_DONE;
+ } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
+ iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
+ sc->ba_rx.stop_tidmask &= ~(1 << tid);
+ }
+ }
+ IWX_UNLOCK(sc);
+}
+
+static void
+iwx_ba_tx_task(void *arg, int npending __unused)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int tid;
+
+ IWX_LOCK(sc);
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_tx.start_tidmask & (1 << tid)) {
+ DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
+ tid));
+ iwx_sta_tx_agg_start(sc, ni, tid);
+ sc->ba_tx.start_tidmask &= ~(1 << tid);
+ sc->sc_flags |= IWX_FLAG_AMPDUTX;
+ }
+ }
+
+ IWX_UNLOCK(sc);
+}
+
+static void
+iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
+{
+ uint32_t mac_addr0, mac_addr1;
+
+ memset(data->hw_addr, 0, sizeof(data->hw_addr));
+
+ if (!iwx_nic_lock(sc))
+ return;
+
+ mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
+ mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
+
+ iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+
+ /* If OEM fused a valid address, use it instead of the one in OTP. */
+ if (iwx_is_valid_mac_addr(data->hw_addr)) {
+ iwx_nic_unlock(sc);
+ return;
+ }
+
+ mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
+ mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
+
+ iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+
+ iwx_nic_unlock(sc);
+}
+
+static int
+iwx_is_valid_mac_addr(const uint8_t *addr)
+{
+ static const uint8_t reserved_mac[] = {
+ 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+ };
+
+ return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
+ memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
+ memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
+ !ETHER_IS_MULTICAST(addr));
+}
+
+static void
+iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
+{
+ const uint8_t *hw_addr;
+
+ hw_addr = (const uint8_t *)&mac_addr0;
+ dest[0] = hw_addr[3];
+ dest[1] = hw_addr[2];
+ dest[2] = hw_addr[1];
+ dest[3] = hw_addr[0];
+
+ hw_addr = (const uint8_t *)&mac_addr1;
+ dest[4] = hw_addr[1];
+ dest[5] = hw_addr[0];
+}
+
+static int
+iwx_nvm_get(struct iwx_softc *sc)
+{
+ struct iwx_nvm_get_info cmd = {};
+ struct iwx_nvm_data *nvm = &sc->sc_nvm;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_GET_INFO)
+ };
+ int err = 0;
+ uint32_t mac_flags;
+ /*
+ * All the values in iwx_nvm_get_info_rsp v4 are the same as
+ * in v3, except for the channel profile part of the
+ * regulatory. So we can just access the new struct, with the
+ * exception of the latter.
+ */
+ struct iwx_nvm_get_info_rsp *rsp;
+ struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
+ int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
+ size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
+
+ hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err) {
+ printf("%s: failed to send cmd (error %d)", __func__, err);
+ return err;
+ }
+
+ if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
+ printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
+ iwx_rx_packet_payload_len(hcmd.resp_pkt));
+ printf("%s: resp_len=%zu\n", __func__, resp_len);
+ err = EIO;
+ goto out;
+ }
+
+ memset(nvm, 0, sizeof(*nvm));
+
+ iwx_set_mac_addr_from_csr(sc, nvm);
+ if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
+ printf("%s: no valid mac address was found\n", DEVNAME(sc));
+ err = EINVAL;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+
+ /* Initialize general data */
+ nvm->nvm_version = le16toh(rsp->general.nvm_version);
+ nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+
+ /* Initialize MAC sku data */
+ mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
+ nvm->sku_cap_11ac_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
+ nvm->sku_cap_11n_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
+ nvm->sku_cap_band_24GHz_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
+ nvm->sku_cap_band_52GHz_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+ nvm->sku_cap_mimo_disable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+
+ /* Initialize PHY sku data */
+ nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
+ nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
+
+ if (le32toh(rsp->regulatory.lar_enabled) &&
+ isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
+ nvm->lar_enabled = 1;
+ }
+
+ memcpy(&sc->sc_rsp_info, rsp, resp_len);
+ if (v4) {
+ sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
+ } else {
+ sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static int
+iwx_load_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_sects *fws;
+ int err;
+
+ IWX_ASSERT_LOCKED(sc)
+
+ sc->sc_uc.uc_intr = 0;
+ sc->sc_uc.uc_ok = 0;
+
+ fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ err = iwx_ctxt_info_gen3_init(sc, fws);
+ else
+ err = iwx_ctxt_info_init(sc, fws);
+ if (err) {
+ printf("%s: could not init context info\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* wait for the firmware to load */
+ err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
+ if (err || !sc->sc_uc.uc_ok) {
+ printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
+ iwx_ctxt_info_free_paging(sc);
+ }
+
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+
+ if (!sc->sc_uc.uc_ok)
+ return EINVAL;
+
+ return err;
+}
+
+static int
+iwx_start_fw(struct iwx_softc *sc)
+{
+ int err;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+
+ iwx_disable_interrupts(sc);
+
+ /* make sure rfkill handshake bits are cleared */
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
+ IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable firmware load interrupt */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+
+ err = iwx_nic_init(sc);
+ if (err) {
+ printf("%s: unable to init nic\n", DEVNAME(sc));
+ return err;
+ }
+
+ iwx_enable_fwload_interrupt(sc);
+
+ return iwx_load_firmware(sc);
+}
+
+static int
+iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
+ size_t len)
+{
+ const struct iwx_ucode_tlv *tlv;
+ uint32_t sha1 = 0;
+ uint16_t mac_type = 0, rf_id = 0;
+ uint8_t *pnvm_data = NULL, *tmp;
+ int hw_match = 0;
+ uint32_t size = 0;
+ int err;
+
+ while (len >= sizeof(*tlv)) {
+ uint32_t tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32toh(tlv->length);
+ tlv_type = le32toh(tlv->type);
+
+ if (len < tlv_len) {
+ printf("%s: invalid TLV len: %zd/%u\n",
+ DEVNAME(sc), len, tlv_len);
+ err = EINVAL;
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWX_UCODE_TLV_PNVM_VERSION:
+ if (tlv_len < sizeof(uint32_t))
+ break;
+
+ sha1 = le32_to_cpup((const uint32_t *)data);
+ break;
+ case IWX_UCODE_TLV_HW_TYPE:
+ if (tlv_len < 2 * sizeof(uint16_t))
+ break;
+
+ if (hw_match)
+ break;
+
+ mac_type = le16_to_cpup((const uint16_t *)data);
+ rf_id = le16_to_cpup((const uint16_t *)(data +
+ sizeof(uint16_t)));
+
+ if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
+ rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
+ hw_match = 1;
+ break;
+ case IWX_UCODE_TLV_SEC_RT: {
+ const struct iwx_pnvm_section *section;
+ uint32_t data_len;
+
+ section = (const void *)data;
+ data_len = tlv_len - sizeof(*section);
+
+ /* TODO: remove, this is a deprecated separator */
+ if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
+ break;
+
+ tmp = malloc(size + data_len, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ if (tmp == NULL) {
+ err = ENOMEM;
+ goto out;
+ }
+ // XXX:misha pnvm_data is NULL and size is 0 at first pass
+ memcpy(tmp, pnvm_data, size);
+ memcpy(tmp + size, section->data, data_len);
+ free(pnvm_data, M_DEVBUF);
+ pnvm_data = tmp;
+ size += data_len;
+ break;
+ }
+ case IWX_UCODE_TLV_PNVM_SKU:
+ /* New PNVM section started, stop parsing. */
+ goto done;
+ default:
+ break;
+ }
+
+ if (roundup(tlv_len, 4) > len)
+ break;
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+done:
+ if (!hw_match || size == 0) {
+ err = ENOENT;
+ goto out;
+ }
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
+ if (err) {
+ printf("%s: could not allocate DMA memory for PNVM\n",
+ DEVNAME(sc));
+ err = ENOMEM;
+ goto out;
+ }
+ memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
+ iwx_ctxt_info_gen3_set_pnvm(sc);
+ sc->sc_pnvm_ver = sha1;
+out:
+ free(pnvm_data, M_DEVBUF);
+ return err;
+}
+
+static int
+iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
+{
+ const struct iwx_ucode_tlv *tlv;
+
+ while (len >= sizeof(*tlv)) {
+ uint32_t tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32toh(tlv->length);
+ tlv_type = le32toh(tlv->type);
+
+ if (len < tlv_len || roundup(tlv_len, 4) > len)
+ return EINVAL;
+
+ if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
+ const struct iwx_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
+
+ data += sizeof(*tlv) + roundup(tlv_len, 4);
+ len -= roundup(tlv_len, 4);
+
+ if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
+ sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
+ sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
+ iwx_pnvm_handle_section(sc, data, len) == 0)
+ return 0;
+ } else {
+ data += sizeof(*tlv) + roundup(tlv_len, 4);
+ len -= roundup(tlv_len, 4);
+ }
+ }
+
+ return ENOENT;
+}
+
+/* Make AX210 firmware loading context point at PNVM image in DMA memory. */
+static void
+iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
+{
+ struct iwx_prph_scratch *prph_scratch;
+ struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+
+ prph_scratch = sc->prph_scratch_dma.vaddr;
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
+
+ bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
+}
+
+/*
+ * Load platform-NVM (non-volatile-memory) data from the filesystem.
+ * This data apparently contains regulatory information and affects device
+ * channel configuration.
+ * The SKU of AX210 devices tells us which PNVM file section is needed.
+ * Pre-AX210 devices store NVM data onboard.
+ */
+static int
+iwx_load_pnvm(struct iwx_softc *sc)
+{
+ const int wait_flags = IWX_PNVM_COMPLETE;
+ int err = 0;
+ const struct firmware *pnvm;
+
+ if (sc->sc_sku_id[0] == 0 &&
+ sc->sc_sku_id[1] == 0 &&
+ sc->sc_sku_id[2] == 0)
+ return 0;
+
+ if (sc->sc_pnvm_name) {
+ if (sc->pnvm_dma.vaddr == NULL) {
+ IWX_UNLOCK(sc);
+ pnvm = firmware_get(sc->sc_pnvm_name);
+ if (pnvm == NULL) {
+ printf("%s: could not read %s (error %d)\n",
+ DEVNAME(sc), sc->sc_pnvm_name, err);
+ IWX_LOCK(sc);
+ return EINVAL;
+ }
+
+ err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
+ IWX_LOCK(sc);
+ if (err && err != ENOENT) {
+ return EINVAL;
+ }
+ } else
+ iwx_ctxt_info_gen3_set_pnvm(sc);
+ }
+
+ if (!iwx_nic_lock(sc)) {
+ return EBUSY;
+ }
+
+ /*
+ * If we don't have a platform NVM file simply ask firmware
+ * to proceed without it.
+ */
+
+ iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
+ IWX_UREG_DOORBELL_TO_ISR6_PNVM);
+
+ /* Wait for the pnvm complete notification from firmware. */
+ while ((sc->sc_init_complete & wait_flags) != wait_flags) {
+ err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
+ if (err)
+ break;
+ }
+
+ iwx_nic_unlock(sc);
+
+ return err;
+}
+
+static int
+iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
+{
+ struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = htole32(valid_tx_ant),
+ };
+
+ return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
+ 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static int
+iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
+{
+ struct iwx_phy_cfg_cmd phy_cfg_cmd;
+
+ phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
+ phy_cfg_cmd.calib_control.event_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+static int
+iwx_send_dqa_cmd(struct iwx_softc *sc)
+{
+ struct iwx_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
+ };
+ uint32_t cmd_id;
+
+ cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+}
+
+static int
+iwx_load_ucode_wait_alive(struct iwx_softc *sc)
+{
+ int err;
+
+ IWX_UNLOCK(sc);
+ err = iwx_read_firmware(sc);
+ IWX_LOCK(sc);
+ if (err)
+ return err;
+
+ err = iwx_start_fw(sc);
+ if (err)
+ return err;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ err = iwx_load_pnvm(sc);
+ if (err)
+ return err;
+ }
+
+ iwx_post_alive(sc);
+
+ return 0;
+}
+
+static int
+iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
+{
+ const int wait_flags = IWX_INIT_COMPLETE;
+ struct iwx_nvm_access_complete_cmd nvm_complete = {};
+ struct iwx_init_extended_cfg_cmd init_cfg = {
+ .init_flags = htole32(IWX_INIT_NVM),
+ };
+
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
+ printf("%s: radio is disabled by hardware switch\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+
+ sc->sc_init_complete = 0;
+ err = iwx_load_ucode_wait_alive(sc);
+ if (err) {
+ printf("%s: failed to load init firmware\n", DEVNAME(sc));
+ return err;
+ } else {
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: successfully loaded init firmware\n", __func__);
+ }
+
+ /*
+ * Send init config command to mark that we are sending NVM
+ * access commands
+ */
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
+ if (err) {
+ printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
+ err);
+ return err;
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
+ if (err) {
+ return err;
+ }
+
+ /* Wait for the init complete notification from the firmware. */
+ while ((sc->sc_init_complete & wait_flags) != wait_flags) {
+ err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
+ if (err) {
+ DPRINTF(("%s: will return err=%d\n", __func__, err));
+ return err;
+ } else {
+ DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
+ __func__));
+ }
+ }
+
+ if (readnvm) {
+ err = iwx_nvm_get(sc);
+ DPRINTF(("%s: err=%d\n", __func__, err));
+ if (err) {
+ printf("%s: failed to read nvm (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ } else {
+ DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
+ }
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
+ }
+ return 0;
+}
+
+static int
+iwx_config_ltr(struct iwx_softc *sc)
+{
+ struct iwx_ltr_config_cmd cmd = {
+ .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!sc->sc_ltr_enabled)
+ return 0;
+
+ return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
+}
+
+static void
+iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
+ bus_dma_segment_t *seg)
+{
+ struct iwx_rx_data *data = &ring->data[idx];
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_rx_transfer_desc *desc = ring->desc;
+ desc[idx].rbid = htole16(idx & 0xffff);
+ desc[idx].addr = htole64((*seg).ds_addr);
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ ((uint64_t *)ring->desc)[idx] =
+ htole64((*seg).ds_addr);
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_PREWRITE);
+ }
+}
+
+static int
+iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
+{
+ struct iwx_rx_ring *ring = &sc->rxq;
+ struct iwx_rx_data *data = &ring->data[idx];
+ struct mbuf *m;
+ int err;
+ int fatal = 0;
+ bus_dma_segment_t seg;
+ int nsegs;
+
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
+ if (m == NULL)
+ return ENOBUFS;
+
+ if (data->m != NULL) {
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ fatal = 1;
+ }
+
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err) {
+ /* XXX */
+ if (fatal)
+ panic("could not load RX mbuf");
+ m_freem(m);
+ return err;
+ }
+ data->m = m;
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
+
+ /* Update RX descriptor. */
+ iwx_update_rx_desc(sc, ring, idx, &seg);
+ return 0;
+}
+
+static int
+iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
+ struct iwx_rx_mpdu_desc *desc)
+{
+ int energy_a, energy_b;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ } else {
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ }
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = energy_b ? -energy_b : -256;
+ return MAX(energy_a, energy_b);
+}
+
+static void
+iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
+ struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
+ int qid = cmd_hdr->qid;
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
+ memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
+}
+
+/*
+ * Retrieve the average noise (in dBm) among receivers.
+ */
+static int
+iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
+{
+ int i, total, nbant, noise;
+
+ total = nbant = noise = 0;
+ for (i = 0; i < 3; i++) {
+ noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
+ if (noise) {
+ total += noise;
+ nbant++;
+ }
+ }
+
+ /* There should be at least one antenna but check anyway. */
+ return (nbant == 0) ? -127 : (total / nbant) - 107;
+}
+
+#if 0
+int
+iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ struct ieee80211_rxinfo *rxi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_key *k;
+ struct ieee80211_frame *wh;
+ uint64_t pn, *prsc;
+ uint8_t *ivp;
+ uint8_t tid;
+ int hdrlen, hasqos;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_get_hdrlen(wh);
+ ivp = (uint8_t *)wh + hdrlen;
+
+ /* find key for decryption */
+ k = ieee80211_get_rxkey(ic, m, ni);
+ if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
+ return 1;
+
+ /* Check that ExtIV bit is be set. */
+ if (!(ivp[3] & IEEE80211_WEP_EXTIV))
+ return 1;
+
+ hasqos = ieee80211_has_qos(wh);
+ tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
+ prsc = &k->k_rsc[tid];
+
+ /* Extract the 48-bit PN from the CCMP header. */
+ pn = (uint64_t)ivp[0] |
+ (uint64_t)ivp[1] << 8 |
+ (uint64_t)ivp[4] << 16 |
+ (uint64_t)ivp[5] << 24 |
+ (uint64_t)ivp[6] << 32 |
+ (uint64_t)ivp[7] << 40;
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ } else if (pn <= *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ /* Last seen packet number is updated in ieee80211_inputm(). */
+
+ /*
+ * Some firmware versions strip the MIC, and some don't. It is not
+ * clear which of the capability flags could tell us what to expect.
+ * For now, keep things simple and just leave the MIC in place if
+ * it is present.
+ *
+ * The IV will be stripped by ieee80211_inputm().
+ */
+ return 0;
+}
+#endif
+
+static int
+iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
+{
+ struct ieee80211_frame *wh;
+ int ret = 0;
+ uint8_t type, subtype;
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL) {
+ return 0;
+ }
+
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
+ return 0;
+ }
+
+
+ if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
+ IEEE80211_FC0_TYPE_CTL)
+ && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
+ if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
+ ret = 1;
+ goto out;
+ }
+ /* Check whether decryption was successful or not. */
+ if ((rx_pkt_status &
+ (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
+ (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
+ DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
+ ret = 1;
+ goto out;
+ }
+ }
+ out:
+ return ret;
+}
+
+static void
+iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
+ uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
+ uint32_t device_timestamp, uint8_t rssi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+
+ /*
+ * We need to turn the hardware provided channel index into a channel
+ * and then find it in our ic_channels array
+ */
+ if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
+ /*
+ * OpenBSD points this at the ibss chan, which it defaults to
+ * channel 1 and then never touches again. Skip a step.
+ */
+ printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
+ chanidx = 1;
+ }
+
+ int channel = chanidx;
+ for (int i = 0; i < ic->ic_nchans; i++) {
+ if (ic->ic_channels[i].ic_ieee == channel) {
+ chanidx = i;
+ }
+ }
+ ic->ic_curchan = &ic->ic_channels[chanidx];
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
+
+#if 0 /* XXX hw decrypt */
+ if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
+ iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
+ m_freem(m);
+ ieee80211_release_node(ic, ni);
+ return;
+ }
+#endif
+ if (ieee80211_radiotap_active_vap(vap)) {
+ struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
+ uint16_t chan_flags;
+ int have_legacy_rate = 1;
+ uint8_t mcs, rate;
+
+ tap->wr_flags = 0;
+ if (is_shortpre)
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ tap->wr_chan_freq =
+ htole16(ic->ic_channels[chanidx].ic_freq);
+ chan_flags = ic->ic_channels[chanidx].ic_flags;
+#if 0
+ if (ic->ic_curmode != IEEE80211_MODE_11N &&
+ ic->ic_curmode != IEEE80211_MODE_11AC) {
+ chan_flags &= ~IEEE80211_CHAN_HT;
+ chan_flags &= ~IEEE80211_CHAN_40MHZ;
+ }
+ if (ic->ic_curmode != IEEE80211_MODE_11AC)
+ chan_flags &= ~IEEE80211_CHAN_VHT;
+#else
+ chan_flags &= ~IEEE80211_CHAN_HT;
+#endif
+ tap->wr_chan_flags = htole16(chan_flags);
+ tap->wr_dbm_antsignal = rssi;
+ tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
+ tap->wr_tsft = device_timestamp;
+
+ if (sc->sc_rate_n_flags_version >= 2) {
+ uint32_t mod_type = (rate_n_flags &
+ IWX_RATE_MCS_MOD_TYPE_MSK);
+ const struct ieee80211_rateset *rs = NULL;
+ uint32_t ridx;
+ have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
+ mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
+ mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
+ ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
+ if (mod_type == IWX_RATE_MCS_CCK_MSK)
+ rs = &ieee80211_std_rateset_11b;
+ else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
+ rs = &ieee80211_std_rateset_11a;
+ if (rs && ridx < rs->rs_nrates) {
+ rate = (rs->rs_rates[ridx] &
+ IEEE80211_RATE_VAL);
+ } else
+ rate = 0;
+ } else {
+ have_legacy_rate = ((rate_n_flags &
+ (IWX_RATE_MCS_HT_MSK_V1 |
+ IWX_RATE_MCS_VHT_MSK_V1)) == 0);
+ mcs = (rate_n_flags &
+ (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
+ IWX_RATE_HT_MCS_NSS_MSK_V1));
+ rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
+ }
+ if (!have_legacy_rate) {
+ tap->wr_rate = (0x80 | mcs);
+ } else {
+ switch (rate) {
+ /* CCK rates. */
+ case 10: tap->wr_rate = 2; break;
+ case 20: tap->wr_rate = 4; break;
+ case 55: tap->wr_rate = 11; break;
+ case 110: tap->wr_rate = 22; break;
+ /* OFDM rates. */
+ case 0xd: tap->wr_rate = 12; break;
+ case 0xf: tap->wr_rate = 18; break;
+ case 0x5: tap->wr_rate = 24; break;
+ case 0x7: tap->wr_rate = 36; break;
+ case 0x9: tap->wr_rate = 48; break;
+ case 0xb: tap->wr_rate = 72; break;
+ case 0x1: tap->wr_rate = 96; break;
+ case 0x3: tap->wr_rate = 108; break;
+ /* Unknown rate: should not happen. */
+ default: tap->wr_rate = 0;
+ }
+ // XXX hack - this needs rebased with the new rate stuff anyway
+ tap->wr_rate = rate;
+ }
+ }
+
+ IWX_UNLOCK(sc);
+ if (ni == NULL) {
+ if (ieee80211_input_mimo_all(ic, m) == -1)
+ printf("%s:%d input_all returned -1\n", __func__, __LINE__);
+ } else {
+
+ if (ieee80211_input_mimo(ni, m) == -1)
+ printf("%s:%d input_all returned -1\n", __func__, __LINE__);
+ ieee80211_free_node(ni);
+ }
+ IWX_LOCK(sc);
+}
+
+static void
+iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
+ size_t maxlen)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct ieee80211_key *k;
+ struct ieee80211_rx_stats rxs;
+ struct iwx_rx_mpdu_desc *desc;
+ uint32_t len, hdrlen, rate_n_flags, device_timestamp;
+ int rssi;
+ uint8_t chanidx;
+ uint16_t phy_info;
+ size_t desc_size;
+ int pad = 0;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ desc_size = sizeof(*desc);
+ else
+ desc_size = IWX_RX_DESC_SIZE_V1;
+
+ if (maxlen < desc_size) {
+ m_freem(m);
+ return; /* drop */
+ }
+
+ desc = (struct iwx_rx_mpdu_desc *)pktdata;
+
+ if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
+ !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
+ printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
+ m_freem(m);
+ return; /* drop */
+ }
+
+ len = le16toh(desc->mpdu_len);
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ /* Allow control frames in monitor mode. */
+ if (len < sizeof(struct ieee80211_frame_cts)) {
+ m_freem(m);
+ return;
+ }
+
+ } else if (len < sizeof(struct ieee80211_frame)) {
+ m_freem(m);
+ return;
+ }
+ if (len > maxlen - desc_size) {
+ m_freem(m);
+ return;
+ }
+
+ // TODO: arithmetic on a pointer to void is a GNU extension
+ m->m_data = (char *)pktdata + desc_size;
+ m->m_pkthdr.len = m->m_len = len;
+
+ /* Account for padding following the frame header. */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL) {
+ switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_CTS:
+ hdrlen = sizeof(struct ieee80211_frame_cts);
+ break;
+ case IEEE80211_FC0_SUBTYPE_ACK:
+ hdrlen = sizeof(struct ieee80211_frame_ack);
+ break;
+ default:
+ hdrlen = sizeof(struct ieee80211_frame_min);
+ break;
+ }
+ } else
+ hdrlen = ieee80211_hdrsize(wh);
+
+ if ((le16toh(desc->status) &
+ IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ // CCMP header length
+ hdrlen += 8;
+ }
+
+ memmove(m->m_data + 2, m->m_data, hdrlen);
+ m_adj(m, 2);
+
+ }
+
+ if ((le16toh(desc->status) &
+ IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ pad = 1;
+ }
+
+// /*
+// * Hardware de-aggregates A-MSDUs and copies the same MAC header
+// * in place for each subframe. But it leaves the 'A-MSDU present'
+// * bit set in the frame header. We need to clear this bit ourselves.
+// * (XXX This workaround is not required on AX200/AX201 devices that
+// * have been tested by me, but it's unclear when this problem was
+// * fixed in the hardware. It definitely affects the 9k generation.
+// * Leaving this in place for now since some 9k/AX200 hybrids seem
+// * to exist that we may eventually add support for.)
+// *
+// * And we must allow the same CCMP PN for subframes following the
+// * first subframe. Otherwise they would be discarded as replays.
+// */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
+ DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
+// struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+// uint8_t subframe_idx = (desc->amsdu_info &
+// IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+// if (subframe_idx > 0)
+// rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+// if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
+// m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
+// struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
+// struct ieee80211_qosframe_addr4 *);
+// qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+// } else if (ieee80211_has_qos(wh) &&
+// m->m_len >= sizeof(struct ieee80211_qosframe)) {
+// struct ieee80211_qosframe *qwh = mtod(m,
+// struct ieee80211_qosframe *);
+// qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+// }
+ }
+
+ /*
+ * Verify decryption before duplicate detection. The latter uses
+ * the TID supplied in QoS frame headers and this TID is implicitly
+ * verified as part of the CCMP nonce.
+ */
+ k = ieee80211_crypto_get_txkey(ni, m);
+ if (k != NULL &&
+ (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
+ iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
+ DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
+ m_freem(m);
+ return;
+ }
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ rate_n_flags = le32toh(desc->v3.rate_n_flags);
+ chanidx = desc->v3.channel;
+ device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
+ } else {
+ rate_n_flags = le32toh(desc->v1.rate_n_flags);
+ chanidx = desc->v1.channel;
+ device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
+ }
+
+ phy_info = le16toh(desc->phy_info);
+
+ rssi = iwx_rxmq_get_signal_strength(sc, desc);
+ rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
+ rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
+
+ memset(&rxs, 0, sizeof(rxs));
+ rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
+ rxs.r_flags |= IEEE80211_R_BAND;
+ rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
+ rxs.r_flags |= IEEE80211_R_RSSI | IEEE80211_R_C_RSSI;
+ rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
+
+ rxs.c_ieee = chanidx;
+ rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
+ chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
+ rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
+ rxs.c_rx_tsf = device_timestamp;
+ rxs.c_chain = chanidx;
+
+ /* rssi is in 1/2db units */
+ rxs.c_rssi = rssi * 2;
+ rxs.c_nf = sc->sc_noise;
+
+ if (pad) {
+ rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
+ rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
+ }
+
+ if (ieee80211_add_rx_params(m, &rxs) == 0) {
+ printf("%s: ieee80211_add_rx_params failed\n", __func__);
+ return;
+ }
+
+ ieee80211_add_rx_params(m, &rxs);
+
+#if 0
+ if (iwx_rx_reorder(sc, m, chanidx, desc,
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, &rxi, ml))
+ return;
+#endif
+
+ if (pad) {
+#define TRIM 8
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_hdrsize(wh);
+ memmove(m->m_data + TRIM, m->m_data, hdrlen);
+ m_adj(m, TRIM);
+#undef TRIM
+ }
+
+ iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, rssi);
+}
+
+static void
+iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
+{
+ struct iwx_tfh_tfd *desc = &ring->desc[idx];
+ uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
+ int i;
+
+ /* First TB is never cleared - it is bidirectional DMA data. */
+ for (i = 1; i < num_tbs; i++) {
+ struct iwx_tfh_tb *tb = &desc->tbs[i];
+ memset(tb, 0, sizeof(*tb));
+ }
+ desc->num_tbs = htole16(1);
+
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+static void
+iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
+ struct iwx_tx_data *txd)
+{
+ bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, txd->map);
+
+ ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
+ txd->m = NULL;
+ txd->in = NULL;
+}
+
+static void
+iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
+{
+ struct iwx_tx_data *txd;
+
+ while (ring->tail_hw != idx) {
+ txd = &ring->data[ring->tail];
+ if (txd->m != NULL) {
+ iwx_clear_tx_desc(sc, ring, ring->tail);
+ iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
+ iwx_txd_done(sc, ring, txd);
+ ring->queued--;
+ if (ring->queued < 0)
+ panic("caught negative queue count");
+ }
+ ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
+ ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
+ }
+}
+
+static void
+iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
+ int qid = cmd_hdr->qid, status, txfail;
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ struct iwx_tx_resp *tx_resp = (void *)pkt->data;
+ uint32_t ssn;
+ uint32_t len = iwx_rx_packet_len(pkt);
+ int idx = cmd_hdr->idx;
+ struct iwx_tx_data *txd = &ring->data[idx];
+ struct mbuf *m = txd->m;
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
+
+ /* Sanity checks. */
+ if (sizeof(*tx_resp) > len)
+ return;
+ if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
+ return;
+ if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
+ tx_resp->frame_count * sizeof(tx_resp->status) > len)
+ return;
+
+ sc->sc_tx_timer[qid] = 0;
+
+ if (tx_resp->frame_count > 1) /* A-MPDU */
+ return;
+
+ status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
+ txfail = (status != IWX_TX_STATUS_SUCCESS &&
+ status != IWX_TX_STATUS_DIRECT_DONE);
+
+#ifdef __not_yet__
+ /* TODO: Replace accounting below with ieee80211_tx_complete() */
+ ieee80211_tx_complete(&in->in_ni, m, txfail);
+#else
+ if (txfail)
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ else {
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ if (m->m_flags & M_MCAST)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
+ }
+#endif
+ /*
+ * On hardware supported by iwx(4) the SSN counter corresponds
+ * to a Tx ring index rather than a sequence number.
+ * Frames up to this index (non-inclusive) can now be freed.
+ */
+ memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
+ ssn = le32toh(ssn);
+ if (ssn < sc->max_tfd_queue_size) {
+ iwx_txq_advance(sc, ring, ssn);
+ iwx_clear_oactive(sc, ring);
+ }
+}
+
+static void
+iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ if (ring->queued < iwx_lomark) {
+ sc->qfullmsk &= ~(1 << ring->qid);
+ if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
+ /*
+ * Well, we're in interrupt context, but then again
+ * I guess net80211 does all sorts of stunts in
+ * interrupt context, so maybe this is no biggie.
+ */
+ iwx_start(sc);
+ }
+ }
+}
+
+static void
+iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
+{
+ struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_tx_ring *ring;
+ uint16_t i, tfd_cnt, ra_tid_cnt, idx;
+ int qid;
+
+// if (ic->ic_state != IEEE80211_S_RUN)
+// return;
+
+ if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
+ return;
+
+ if (ba_res->sta_id != IWX_STATION_ID)
+ return;
+
+ in = (void *)ni;
+
+ tfd_cnt = le16toh(ba_res->tfd_cnt);
+ ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
+ if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
+ sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
+ sizeof(ba_res->tfd[0]) * tfd_cnt))
+ return;
+
+ for (i = 0; i < tfd_cnt; i++) {
+ struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+ uint8_t tid;
+
+ tid = ba_tfd->tid;
+ if (tid >= nitems(sc->aggqid))
+ continue;
+
+ qid = sc->aggqid[tid];
+ if (qid != htole16(ba_tfd->q_num))
+ continue;
+
+ ring = &sc->txq[qid];
+
+#if 0
+ ba = &ni->ni_tx_ba[tid];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+#endif
+ idx = le16toh(ba_tfd->tfd_index);
+ sc->sc_tx_timer[qid] = 0;
+ iwx_txq_advance(sc, ring, idx);
+ iwx_clear_oactive(sc, ring);
+ }
+}
+
+static void
+iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
+ uint32_t missed;
+
+ if ((ic->ic_opmode != IEEE80211_M_STA) ||
+ (vap->iv_state != IEEE80211_S_RUN))
+ return;
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
+ if (missed > vap->iv_bmissthreshold) {
+ ieee80211_beacon_miss(ic);
+ }
+
+}
+
+static int
+iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
+{
+ struct iwx_binding_cmd cmd;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
+ uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
+ int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
+ uint32_t status;
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("binding already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("binding already removed");
+
+ if (phyctxt == NULL) /* XXX race with iwx_stop() */
+ return EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+ cmd.action = htole32(action);
+ cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+
+ cmd.macs[0] = htole32(mac_id);
+ for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
+
+ if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ status = 0;
+ err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
+ &cmd, &status);
+ if (err == 0 && status != 0)
+ err = EIO;
+
+ return err;
+}
+
+static uint8_t
+iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+ int ctlchan = ieee80211_chan2ieee(ic, chan);
+ int midpoint = chan->ic_vht_ch_freq1;
+
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one:
+ */
+ uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+
+ switch (ctlchan - midpoint) {
+ case -6:
+ pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
+ break;
+ case -2:
+ pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ break;
+ case 2:
+ pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ break;
+ case 6:
+ pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
+ break;
+ default:
+ break;
+ }
+
+ return pos;
+}
+
+static int
+iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
+ uint8_t vht_chan_width, int cmdver)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_phy_context_cmd_uhb cmd;
+ uint8_t active_cnt, idle_cnt;
+ struct ieee80211_channel *chan = ctxt->channel;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd.action = htole32(action);
+
+ if (IEEE80211_IS_CHAN_2GHZ(chan) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
+
+ if (IEEE80211_IS_CHAN_VHT80(chan)) {
+ cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
+ } else if (IEEE80211_IS_CHAN_HT40(chan)) {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ if (IEEE80211_IS_CHAN_HT40D(chan))
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ else
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+
+ if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) != 2) {
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+ cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rxchain_info |= htole32(idle_cnt <<
+ IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rxchain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+ }
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
+#if 0
+int
+iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
+ uint8_t vht_chan_width, int cmdver)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_phy_context_cmd cmd;
+ uint8_t active_cnt, idle_cnt;
+ struct ieee80211_channel *chan = ctxt->channel;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd.action = htole32(action);
+
+ if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
+ if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
+ cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
+ } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
+ if (sco == IEEE80211_HTOP0_SCO_SCA) {
+ /* secondary chan above -> control chan below */
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
+ /* secondary chan below -> control chan above */
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+
+ if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) != 2) {
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+ cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rxchain_info |= htole32(idle_cnt <<
+ IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rxchain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+ }
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+#endif
+
+static int
+iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
+ uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
+{
+ int cmdver;
+
+ cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
+ if (cmdver != 3 && cmdver != 4) {
+ printf("%s: firmware does not support phy-context-cmd v3/v4\n",
+ DEVNAME(sc));
+ return ENOTSUP;
+ }
+
+ /*
+ * Intel increased the size of the fw_channel_info struct and neglected
+ * to bump the phy_context_cmd struct, which contains an fw_channel_info
+ * member in the middle.
+ * To keep things simple we use a separate function to handle the larger
+ * variant of the phy context command.
+ */
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
+ return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
+ chains_dynamic, action, sco, vht_chan_width, cmdver);
+ } else
+ panic("Unsupported old hardware contact thj@");
+
+#if 0
+ return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
+ action, sco, vht_chan_width, cmdver);
+#endif
+}
+
+static int
+iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+#ifdef IWX_DEBUG
+ iwx_bbl_add_entry(hcmd->id, IWX_BBL_CMD_TX);
+#endif
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tfh_tfd *desc;
+ struct iwx_tx_data *txdata;
+ struct iwx_device_cmd *cmd;
+ struct mbuf *m;
+ bus_addr_t paddr;
+ uint64_t addr;
+ int err = 0, i, paylen, off/*, s*/;
+ int idx, code, async, group_id;
+ size_t hdrlen, datasz;
+ uint8_t *data;
+ int generation = sc->sc_generation;
+ bus_dma_segment_t seg[10];
+ int nsegs;
+
+ code = hcmd->id;
+ async = hcmd->flags & IWX_CMD_ASYNC;
+ idx = ring->cur;
+
+ for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
+ paylen += hcmd->len[i];
+ }
+
+ /* If this command waits for a response, allocate response buffer. */
+ hcmd->resp_pkt = NULL;
+ if (hcmd->flags & IWX_CMD_WANT_RESP) {
+ uint8_t *resp_buf;
+ KASSERT(!async, ("async command want response"));
+ KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
+ ("wrong pkt len 1"));
+ KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
+ ("wrong pkt len 2"));
+ if (sc->sc_cmd_resp_pkt[idx] != NULL)
+ return ENOSPC;
+ resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (resp_buf == NULL)
+ return ENOMEM;
+ sc->sc_cmd_resp_pkt[idx] = resp_buf;
+ sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
+ } else {
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+
+ desc = &ring->desc[idx];
+ txdata = &ring->data[idx];
+
+ /*
+ * XXX Intel inside (tm)
+ * Firmware API versions >= 50 reject old-style commands in
+ * group 0 with a "BAD_COMMAND" firmware error. We must pretend
+ * that such commands were in the LONG_GROUP instead in order
+ * for firmware to accept them.
+ */
+ if (iwx_cmd_groupid(code) == 0) {
+ code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
+ txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
+ } else
+ txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
+
+ group_id = iwx_cmd_groupid(code);
+
+ hdrlen = sizeof(cmd->hdr_wide);
+ datasz = sizeof(cmd->data_wide);
+
+ if (paylen > datasz) {
+ /* Command is too large to fit in pre-allocated space. */
+ size_t totlen = hdrlen + paylen;
+ if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
+ printf("%s: firmware command too long (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ err = EINVAL;
+ goto out;
+ }
+ if (totlen > IWX_RBUF_SIZE)
+ panic("totlen > IWX_RBUF_SIZE");
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
+ if (m == NULL) {
+ printf("%s: could not get fw cmd mbuf (%i bytes)\n",
+ DEVNAME(sc), IWX_RBUF_SIZE);
+ err = ENOMEM;
+ goto out;
+ }
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
+ seg, &nsegs, BUS_DMA_NOWAIT);
+ if (nsegs > 20)
+ panic("nsegs > 20");
+ DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
+ if (err) {
+ printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ m_freem(m);
+ goto out;
+ }
+ txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
+ cmd = mtod(m, struct iwx_device_cmd *);
+ paddr = seg[0].ds_addr;
+ } else {
+ cmd = &ring->cmd[idx];
+ paddr = txdata->cmd_paddr;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
+ cmd->hdr_wide.group_id = group_id;
+ cmd->hdr_wide.qid = ring->qid;
+ cmd->hdr_wide.idx = idx;
+ cmd->hdr_wide.length = htole16(paylen);
+ cmd->hdr_wide.version = iwx_cmd_version(code);
+ data = cmd->data_wide;
+
+ for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
+ if (hcmd->len[i] == 0)
+ continue;
+ memcpy(data + off, hcmd->data[i], hcmd->len[i]);
+ off += hcmd->len[i];
+ }
+ KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
+
+ desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
+ addr = htole64(paddr);
+ memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
+ if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
+ DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
+ paylen));
+ desc->tbs[1].tb_len = htole16(hdrlen + paylen -
+ IWX_FIRST_TB_SIZE);
+ addr = htole64(paddr + IWX_FIRST_TB_SIZE);
+ memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
+ desc->num_tbs = htole16(2);
+ } else
+ desc->num_tbs = htole16(1);
+
+ if (paylen > datasz) {
+ bus_dmamap_sync(ring->data_dmat, txdata->map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ }
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ /* Kick command ring. */
+ ring->queued++;
+ ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
+ ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
+ DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
+
+ if (!async) {
+ err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
+ if (err == 0) {
+ /* if hardware is no longer up, return error */
+ if (generation != sc->sc_generation) {
+ err = ENXIO;
+ goto out;
+ }
+
+ /* Response buffer will be freed in iwx_free_resp(). */
+ hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ } else if (generation == sc->sc_generation) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+ }
+out:
+ return err;
+}
+
+static int
+iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
+ uint16_t len, const void *data)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwx_send_cmd(sc, &cmd);
+}
+
+static int
+iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
+ uint32_t *status)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_cmd_response *resp;
+ int err, resp_len;
+
+ KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
+ cmd->flags |= IWX_CMD_WANT_RESP;
+ cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
+
+ err = iwx_send_cmd(sc, cmd);
+ if (err)
+ return err;
+
+ pkt = cmd->resp_pkt;
+ if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
+ return EIO;
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ iwx_free_resp(sc, cmd);
+ return EIO;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32toh(resp->status);
+ iwx_free_resp(sc, cmd);
+ return err;
+}
+
+static int
+iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
+ const void *data, uint32_t *status)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwx_send_cmd_status(sc, &cmd, status);
+}
+
+static void
+iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+ KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
+ ("hcmd flags !IWX_CMD_WANT_RESP"));
+ free(hcmd->resp_pkt, M_DEVBUF);
+ hcmd->resp_pkt = NULL;
+}
+
+static void
+iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
+{
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tx_data *data;
+
+ if (qid != IWX_DQA_CMD_QUEUE) {
+ return; /* Not a command ack. */
+ }
+
+ data = &ring->data[idx];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ wakeup(&ring->desc[idx]);
+
+ DPRINTF(("%s: command 0x%x done\n", __func__, code));
+ if (ring->queued == 0) {
+ DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
+ DEVNAME(sc), code));
+ } else if (ring->queued > 0)
+ ring->queued--;
+}
+
+static uint32_t
+iwx_fw_rateidx_ofdm(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11a rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return 0;
+}
+
+static uint32_t
+iwx_fw_rateidx_cck(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11b rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return 0;
+}
+
+static int
+iwx_min_basic_rate(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct ieee80211_channel *c = ni->ni_chan;
+ int i, min, rval;
+
+ min = -1;
+
+ if (c == IEEE80211_CHAN_ANYC) {
+ printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
+ return -1;
+ }
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (min == -1)
+ min = rval;
+ else if (rval < min)
+ min = rval;
+ }
+
+ /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
+ if (min == -1)
+ min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
+
+ return min;
+}
+
+/*
+ * Determine the Tx command flags and Tx rate+flags to use.
+ * Return the selected Tx rate.
+ */
+static const struct iwx_rate *
+iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
+ struct mbuf *m)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ const struct iwx_rate *rinfo = NULL;
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ int ridx = iwx_min_basic_rate(ic);
+ int min_ridx, rate_flags;
+ uint8_t rval;
+
+ /* We're in the process of clearing the node, no channel already */
+ if (ridx == -1)
+ return NULL;
+
+ min_ridx = iwx_rval2ridx(ridx);
+
+ *flags = 0;
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ type != IEEE80211_FC0_TYPE_DATA) {
+ /* for non-data, use the lowest supported rate */
+ ridx = min_ridx;
+ *flags |= IWX_TX_FLAGS_CMD_RATE;
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
+ & ~IEEE80211_RATE_MCS];
+ } else {
+ rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
+ & IEEE80211_RATE_VAL);
+ ridx = iwx_rval2ridx(rval);
+ if (ridx < min_ridx)
+ ridx = min_ridx;
+ }
+
+ if (m->m_flags & M_EAPOL)
+ *flags |= IWX_TX_FLAGS_HIGH_PRI;
+
+ rinfo = &iwx_rates[ridx];
+
+ /*
+ * Do not fill rate_n_flags if firmware controls the Tx rate.
+ * For data frames we rely on Tx rate scaling in firmware by default.
+ */
+ if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
+ *rate_n_flags = 0;
+ return rinfo;
+ }
+
+ /*
+ * Forcing a CCK/OFDM legacy rate is important for management frames.
+ * Association will only succeed if we do this correctly.
+ */
+
+ DPRINTF(("%s: min_ridx=%i\n", __func__, min_ridx));
+ DPRINTF(("%s: ridx=%i\n", __func__, ridx));
+ rate_flags = IWX_RATE_MCS_ANT_A_MSK;
+ if (IWX_RIDX_IS_CCK(ridx)) {
+ if (sc->sc_rate_n_flags_version >= 2)
+ rate_flags |= IWX_RATE_MCS_CCK_MSK;
+ else
+ rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
+ } else if (sc->sc_rate_n_flags_version >= 2)
+ rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
+
+ rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
+ & IEEE80211_RATE_VAL);
+ DPRINTF(("%s: rval=%i\n", __func__, rval));
+ if (sc->sc_rate_n_flags_version >= 2) {
+ if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
+ rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
+ IWX_RATE_LEGACY_RATE_MSK);
+ } else {
+ rate_flags |= (iwx_fw_rateidx_cck(rval) &
+ IWX_RATE_LEGACY_RATE_MSK);
+ }
+ } else
+ rate_flags |= rinfo->plcp;
+
+ *rate_n_flags = rate_flags;
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
+ __func__, __LINE__,*flags);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
+ __func__, __LINE__, *rate_n_flags);
+
+ return rinfo;
+}
+
+static void
+iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
+ int idx, uint16_t byte_cnt, uint16_t num_tbs)
+{
+ uint8_t filled_tfd_size, num_fetch_chunks;
+ uint16_t len = byte_cnt;
+ uint16_t bc_ent;
+
+ filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwx_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
+ /* Starting from AX210, the HW expects bytes */
+ bc_ent = htole16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
+ } else {
+ struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
+ /* Before AX210, the HW expects DW */
+ len = howmany(len, 4);
+ bc_ent = htole16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
+}
+
+static int
+iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = (void *)ni;
+ struct iwx_tx_ring *ring;
+ struct iwx_tx_data *data;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *k = NULL;
+ const struct iwx_rate *rinfo;
+ uint64_t paddr;
+ u_int hdrlen;
+ uint32_t rate_n_flags;
+ uint16_t num_tbs, flags, offload_assist = 0;
+ uint8_t type, subtype;
+ int i, totlen, err, pad, qid;
+#define IWM_MAX_SCATTER 20
+ bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
+ int nsegs;
+ struct mbuf *m1;
+ size_t txcmd_size;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ hdrlen = ieee80211_anyhdrsize(wh);
+
+ qid = sc->first_data_qid;
+
+ /* Put QoS frames on the data queue which maps to their TID. */
+ if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
+ uint16_t qos = ieee80211_gettid(wh);
+ uint8_t tid = qos & IEEE80211_QOS_TID;
+#if 0
+ /*
+ * XXX-THJ: TODO when we enable ba we need to manage the
+ * mappings
+ */
+ struct ieee80211_tx_ba *ba;
+ ba = &ni->ni_tx_ba[tid];
+
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_BAR &&
+ sc->aggqid[tid] != 0 /*&&
+ ba->ba_state == IEEE80211_BA_AGREED*/) {
+ qid = sc->aggqid[tid];
+#else
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ sc->aggqid[tid] != 0) {
+ qid = sc->aggqid[tid];
+#endif
+ }
+ }
+
+ ring = &sc->txq[qid];
+ desc = &ring->desc[ring->cur];
+ memset(desc, 0, sizeof(*desc));
+ data = &ring->data[ring->cur];
+
+ cmd = &ring->cmd[ring->cur];
+ cmd->hdr.code = IWX_TX_CMD;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+
+ rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
+ if (rinfo == NULL)
+ return EINVAL;
+
+ if (ieee80211_radiotap_active_vap(vap)) {
+ struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
+
+ tap->wt_flags = 0;
+ tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
+ tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
+ tap->wt_rate = rinfo->rate;
+ if (k != NULL)
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
+ ieee80211_radiotap_tx(vap, m);
+ }
+
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+ k = ieee80211_crypto_get_txkey(ni, m);
+ if (k == NULL) {
+ printf("%s: k is NULL!\n", __func__);
+ m_freem(m);
+ return (ENOBUFS);
+ } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ k->wk_keytsc++;
+ } else {
+ k->wk_cipher->ic_encap(k, m);
+
+ /* 802.11 headers may have moved */
+ wh = mtod(m, struct ieee80211_frame *);
+ flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
+ }
+ } else
+ flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
+
+ totlen = m->m_pkthdr.len;
+
+ if (hdrlen & 3) {
+ /* First segment length must be a multiple of 4. */
+ pad = 4 - (hdrlen & 3);
+ offload_assist |= IWX_TX_CMD_OFFLD_PAD;
+ } else
+ pad = 0;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+ tx->len = htole16(totlen);
+ tx->offload_assist = htole32(offload_assist);
+ tx->flags = htole16(flags);
+ tx->rate_n_flags = htole32(rate_n_flags);
+ memcpy(tx->hdr, wh, hdrlen);
+ txcmd_size = sizeof(*tx);
+ } else {
+ struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+ tx->len = htole16(totlen);
+ tx->offload_assist = htole16(offload_assist);
+ tx->flags = htole32(flags);
+ tx->rate_n_flags = htole32(rate_n_flags);
+ memcpy(tx->hdr, wh, hdrlen);
+ txcmd_size = sizeof(*tx);
+ }
+#if IWX_DEBUG
+ iwx_bbl_add_entry(totlen, IWX_BBL_PKT_TX);
+#endif
+
+ /* Trim 802.11 header. */
+ m_adj(m, hdrlen);
+
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err && err != EFBIG) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
+ m_freem(m);
+ return err;
+ }
+ if (err) {
+ /* Too many DMA segments, linearize mbuf. */
+ m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
+ if (m1 == NULL) {
+ printf("%s: could not defrag mbufs\n", __func__);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ m = m1;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (err) {
+ printf("%s: can't map mbuf (error %d)\n", __func__,
+ err);
+ m_freem(m);
+ return (err);
+ }
+ }
+ data->m = m;
+ data->in = in;
+
+ /* Fill TX descriptor. */
+ num_tbs = 2 + nsegs;
+ desc->num_tbs = htole16(num_tbs);
+
+ desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr);
+ memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
+ DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
+ desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
+ txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
+ memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
+
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
+ DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
+
+ /* Other DMA segments are for data payload. */
+ for (i = 0; i < nsegs; i++) {
+ seg = &segs[i];
+ desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
+ paddr = htole64(seg->ds_addr);
+ memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
+ DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
+ }
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
+
+ /* Kick TX ring. */
+ ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
+ ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
+
+ /* Mark TX ring as full if we reach a certain threshold. */
+ if (++ring->queued > iwx_himark) {
+ sc->qfullmsk |= 1 << ring->qid;
+ }
+
+ sc->sc_tx_timer[ring->qid] = 15;
+
+ return 0;
+}
+
+static int
+iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_path_flush_cmd_rsp *resp;
+ struct iwx_tx_path_flush_cmd flush_cmd = {
+ .sta_id = htole32(sta_id),
+ .tid_mask = htole16(tids),
+ };
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ int err, resp_len, i, num_flushed_queues;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ /* Some firmware versions don't provide a response. */
+ if (resp_len == 0)
+ goto out;
+ else if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (le16toh(resp->sta_id) != sta_id) {
+ err = EIO;
+ goto out;
+ }
+
+ num_flushed_queues = le16toh(resp->num_flushed_queues);
+ if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
+ err = EIO;
+ goto out;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct iwx_flush_queue_info *queue_info = &resp->queues[i];
+ uint16_t tid = le16toh(queue_info->tid);
+ uint16_t read_after = le16toh(queue_info->read_after_flush);
+ uint16_t qid = le16toh(queue_info->queue_num);
+ struct iwx_tx_ring *txq;
+
+ if (qid >= nitems(sc->txq))
+ continue;
+
+ txq = &sc->txq[qid];
+ if (tid != txq->tid)
+ continue;
+
+ iwx_txq_advance(sc, txq, read_after);
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+#define IWX_FLUSH_WAIT_MS 2000
+
+static int
+iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
+{
+ struct iwx_add_sta_cmd cmd;
+ int err;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd.sta_id = IWX_STATION_ID;
+ cmd.add_modify = IWX_STA_MODE_MODIFY;
+ cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
+ sizeof(cmd), &cmd, &status);
+ if (err) {
+ printf("%s: could not update sta (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ switch (status & IWX_ADD_STA_STATUS_MASK) {
+ case IWX_ADD_STA_SUCCESS:
+ break;
+ default:
+ err = EIO;
+ printf("%s: Couldn't %s draining for station\n",
+ DEVNAME(sc), drain ? "enable" : "disable");
+ break;
+ }
+
+ return err;
+}
+
+static int
+iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ sc->sc_flags |= IWX_FLAG_TXFLUSH;
+
+ err = iwx_drain_sta(sc, in, 1);
+ if (err)
+ goto done;
+
+ err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ goto done;
+ }
+
+ /*
+ * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
+ * fc drive rand has has been replaced in OpenBSD.
+ */
+
+ err = iwx_drain_sta(sc, in, 0);
+done:
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
+ return err;
+}
+
+#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static int
+iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
+ struct iwx_beacon_filter_cmd *cmd)
+{
+ return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
+ 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
+}
+
+static int
+iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(enable),
+ };
+
+ if (!sc->sc_bf.bf_enabled)
+ return 0;
+
+ sc->sc_bf.ba_enabled = enable;
+ return iwx_beacon_filter_send_cmd(sc, &cmd);
+}
+
+static void
+iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_power_cmd *cmd)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ int dtim_period, dtim_msec, keep_alive;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ if (vap->iv_dtim_period)
+ dtim_period = vap->iv_dtim_period;
+ else
+ dtim_period = 1;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM.
+ */
+ dtim_msec = dtim_period * ni->ni_intval;
+ keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = roundup(keep_alive, 1000) / 1000;
+ cmd->keep_alive_seconds = htole16(keep_alive);
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+}
+
+static int
+iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+ int ba_enable;
+ struct iwx_mac_power_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_power_build_cmd(sc, in, &cmd);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
+ sizeof(cmd), &cmd);
+ if (err != 0)
+ return err;
+
+ ba_enable = !!(cmd.flags &
+ htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+ return iwx_update_beacon_abort(sc, in, ba_enable);
+}
+
+static int
+iwx_power_update_device(struct iwx_softc *sc)
+{
+ struct iwx_device_power_cmd cmd = { };
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ return iwx_send_cmd_pdu(sc,
+ IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
+ };
+ int err;
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 1;
+
+ return err;
+}
+
+static int
+iwx_disable_beacon_filter(struct iwx_softc *sc)
+{
+ struct iwx_beacon_filter_cmd cmd;
+ int err;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 0;
+
+ return err;
+}
+
+static int
+iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
+{
+ struct iwx_add_sta_cmd add_sta_cmd;
+ int err, i;
+ uint32_t status, aggsize;
+ const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
+ IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+
+ if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
+ panic("STA already added");
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
+ add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
+ } else {
+ add_sta_cmd.sta_id = IWX_STATION_ID;
+ add_sta_cmd.station_type = IWX_STA_LINK;
+ }
+ add_sta_cmd.mac_id_n_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ if (!update) {
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ etheranyaddr);
+ else
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ in->in_macaddr);
+ }
+ DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
+ ether_sprintf(add_sta_cmd.addr)));
+ add_sta_cmd.add_modify = update ? 1 : 0;
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
+
+ if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
+ IWX_STA_FLG_AGG_MPDU_DENS_MSK);
+
+ if (iwx_mimo_enabled(sc)) {
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ add_sta_cmd.station_flags |=
+ htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
+ } else {
+ int hasmimo = 0;
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7) {
+ hasmimo = 1;
+ break;
+ }
+ }
+ if (hasmimo) {
+ add_sta_cmd.station_flags |=
+ htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
+ }
+ }
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_HT &&
+ IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ add_sta_cmd.station_flags |= htole32(
+ IWX_STA_FLG_FAT_EN_40MHZ);
+ }
+
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
+ add_sta_cmd.station_flags |= htole32(
+ IWX_STA_FLG_FAT_EN_80MHZ);
+ }
+ // XXX-misha: TODO get real ampdu size
+ aggsize = max_aggsize;
+ } else {
+ aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
+ IEEE80211_HTCAP_MAXRXAMPDU);
+ }
+
+ if (aggsize > max_aggsize)
+ aggsize = max_aggsize;
+ add_sta_cmd.station_flags |= htole32((aggsize <<
+ IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
+ IWX_STA_FLG_MAX_AGG_SIZE_MSK);
+
+ switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
+ IEEE80211_HTCAP_MPDUDENSITY)) {
+ case IEEE80211_HTCAP_MPDUDENSITY_2:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_4:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_8:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_16:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
+ break;
+ default:
+ break;
+ }
+ }
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+
+ return err;
+}
+
+static int
+iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_rm_sta_cmd rm_sta_cmd;
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
+ panic("sta already removed");
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
+ else
+ rm_sta_cmd.sta_id = IWX_STATION_ID;
+
+ err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
+ &rm_sta_cmd);
+
+ return err;
+}
+
+static int
+iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err, i, cmd_ver;
+
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /*
+ * New SCD_QUEUE_CONFIG API requires explicit queue removal
+ * before a station gets removed.
+ */
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
+ err = iwx_disable_mgmt_queue(sc);
+ if (err)
+ return err;
+ for (i = IWX_FIRST_AGG_TX_QUEUE;
+ i < IWX_LAST_AGG_TX_QUEUE; i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ if ((sc->qenablemsk & (1 << i)) == 0)
+ continue;
+ err = iwx_disable_txq(sc, IWX_STATION_ID,
+ ring->qid, ring->tid);
+ if (err) {
+ printf("%s: could not disable Tx queue %d "
+ "(error %d)\n", DEVNAME(sc), ring->qid,
+ err);
+ return err;
+ }
+ }
+ }
+
+ err = iwx_rm_sta_cmd(sc, in);
+ if (err) {
+ printf("%s: could not remove STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ in->in_flags = 0;
+
+ sc->sc_rx_ba_sessions = 0;
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
+ for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
+ sc->qenablemsk &= ~(1 << i);
+
+#if 0
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
+#endif
+ /* Clear ampdu rx state (GOS-1525) */
+ for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
+ struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
+ ba->ba_flags = 0;
+ }
+
+ return 0;
+}
+
+static uint8_t
+iwx_umac_scan_fill_channels(struct iwx_softc *sc,
+ struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
+ int n_ssids, uint32_t channel_cfg_flags)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ struct ieee80211_channel *c;
+ uint8_t nchan;
+ int j;
+
+ for (nchan = j = 0;
+ j < ss->ss_last &&
+ nchan < sc->sc_capa_n_scan_channels;
+ j++) {
+ uint8_t channel_num;
+
+ c = ss->ss_chans[j];
+ channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
+ if (isset(sc->sc_ucode_api,
+ IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
+ chan->v2.channel_num = channel_num;
+ if (IEEE80211_IS_CHAN_2GHZ(c))
+ chan->v2.band = IWX_PHY_BAND_24;
+ else
+ chan->v2.band = IWX_PHY_BAND_5;
+ chan->v2.iter_count = 1;
+ chan->v2.iter_interval = 0;
+ } else {
+ chan->v1.channel_num = channel_num;
+ chan->v1.iter_count = 1;
+ chan->v1.iter_interval = htole16(0);
+ }
+ chan->flags |= htole32(channel_cfg_flags);
+ chan++;
+ nchan++;
+ }
+
+ return nchan;
+}
+
+static int
+iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
+ struct ieee80211_rateset *rs;
+ size_t remain = sizeof(preq->buf);
+ uint8_t *frm, *pos;
+
+ memset(preq, 0, sizeof(*preq));
+
+ if (remain < sizeof(*wh) + 2)
+ return ENOBUFS;
+
+ /*
+ * Build a probe request frame. Most of the following code is a
+ * copy & paste of what is done in net80211.
+ */
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ IEEE80211_FC0_SUBTYPE_PROBE_REQ;
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
+ *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
+ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
+
+ frm = (uint8_t *)(wh + 1);
+ *frm++ = IEEE80211_ELEMID_SSID;
+ *frm++ = 0;
+ /* hardware inserts SSID */
+
+ /* Tell the firmware where the MAC header is. */
+ preq->mac_header.offset = 0;
+ preq->mac_header.len = htole16(frm - (uint8_t *)wh);
+ remain -= frm - (uint8_t *)wh;
+
+ /* Fill in 2GHz IEs and tell firmware where they are. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ remain -= frm - pos;
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
+ if (remain < 3)
+ return ENOBUFS;
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = 0;
+ remain -= 3;
+ }
+ preq->band_data[0].len = htole16(frm - pos);
+
+ if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
+ /* Fill in 5GHz IEs. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[1].len = htole16(frm - pos);
+ remain -= frm - pos;
+ if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
+ if (remain < 14)
+ return ENOBUFS;
+ frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
+ remain -= frm - pos;
+ preq->band_data[1].len = htole16(frm - pos);
+ }
+ }
+
+ /* Send 11n IEs on both 2GHz and 5GHz bands. */
+ preq->common_data.offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
+ if (remain < 28)
+ return ENOBUFS;
+ frm = ieee80211_add_htcap(frm, vap->iv_bss);
+ /* XXX add WME info? */
+ remain -= frm - pos;
+ }
+
+ preq->common_data.len = htole16(frm - pos);
+
+ return 0;
+}
+
+static int
+iwx_config_umac_scan_reduced(struct iwx_softc *sc)
+{
+ struct iwx_scan_config scan_cfg;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
+ .len[0] = sizeof(scan_cfg),
+ .data[0] = &scan_cfg,
+ .flags = 0,
+ };
+ int cmdver;
+
+ if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
+ printf("%s: firmware does not support reduced scan config\n",
+ DEVNAME(sc));
+ return ENOTSUP;
+ }
+
+ memset(&scan_cfg, 0, sizeof(scan_cfg));
+
+ /*
+ * SCAN_CFG version >= 5 implies that the broadcast
+ * STA ID field is deprecated.
+ */
+ cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
+ if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
+ scan_cfg.bcast_sta_id = 0xff;
+
+ scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
+ scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
+
+ return iwx_send_cmd(sc, &hcmd);
+}
+
+static uint16_t
+iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ uint16_t flags = 0;
+
+ if (ss->ss_nssid == 0) {
+ DPRINTF(("%s: Passive scan started\n", __func__));
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ }
+
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ return flags;
+}
+
+#define IWX_SCAN_DWELL_ACTIVE 10
+#define IWX_SCAN_DWELL_PASSIVE 110
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default high band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+/* adaptive dwell default low band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* adaptive dwell number of APs override for p2p friendly GO channels */
+#define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+/* adaptive dwell number of APs override for social channels */
+#define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+static void
+iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
+ struct iwx_scan_general_params_v10 *general_params, int bgscan)
+{
+ uint32_t suspend_time, max_out_time;
+ uint8_t active_dwell, passive_dwell;
+
+ active_dwell = IWX_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWX_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ if (bgscan)
+ general_params->adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
+ if (bgscan) {
+ max_out_time = htole32(120);
+ suspend_time = htole32(120);
+ } else {
+ max_out_time = htole32(0);
+ suspend_time = htole32(0);
+ }
+ general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
+ htole32(max_out_time);
+ general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
+ htole32(suspend_time);
+ general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
+ htole32(max_out_time);
+ general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
+ htole32(suspend_time);
+
+ general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+static void
+iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
+ struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
+{
+ iwx_scan_umac_dwell_v10(sc, gp, bgscan);
+
+ gp->flags = htole16(gen_flags);
+
+ if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
+ if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
+
+ gp->scan_start_mac_id = 0;
+}
+
+static void
+iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
+ struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
+ int n_ssid)
+{
+ cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
+ nitems(cp->channel_config), n_ssid, channel_cfg_flags);
+
+ cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+}
+
+static int
+iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
+ .len = { 0, },
+ .data = { NULL, },
+ .flags = 0,
+ };
+ struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
+ struct iwx_scan_req_params_v14 *scan_p;
+ int err, async = bgscan, n_ssid = 0;
+ uint16_t gen_flags;
+ uint32_t bitmap_ssid = 0;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
+
+ scan_p = &cmd->scan_params;
+
+ cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
+ cmd->uid = htole32(0);
+
+ gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
+ iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
+ gen_flags, bgscan);
+
+ scan_p->periodic_params.schedule[0].interval = htole16(0);
+ scan_p->periodic_params.schedule[0].iter_count = 1;
+
+ err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
+ if (err) {
+ printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
+ err);
+ return err;
+ }
+
+ for (int i=0; i < ss->ss_nssid; i++) {
+ scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
+ scan_p->probe_params.direct_scan[i].len =
+ MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
+ DPRINTF(("%s: Active scan started for ssid ", __func__));
+ memcpy(scan_p->probe_params.direct_scan[i].ssid,
+ ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
+ n_ssid++;
+ bitmap_ssid |= (1 << i);
+ }
+ DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
+
+ iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
+ n_ssid);
+
+ hcmd.len[0] = sizeof(*cmd);
+ hcmd.data[0] = (void *)cmd;
+ hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ return err;
+}
+
+static void
+iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
+{
+ char alpha2[3];
+
+ snprintf(alpha2, sizeof(alpha2), "%c%c",
+ (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
+ "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
+
+ /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
+}
+
+uint8_t
+iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
+{
+ int i;
+ uint8_t rval;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (rval == iwx_rates[ridx].rate)
+ return rs->rs_rates[i];
+ }
+
+ return 0;
+}
+
+static int
+iwx_rval2ridx(int rval)
+{
+ int ridx;
+
+ for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
+ if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
+ continue;
+ if (rval == iwx_rates[ridx].rate)
+ break;
+ }
+
+ return ridx;
+}
+
+static void
+iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
+ int *ofdm_rates)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ int lowest_present_ofdm = -1;
+ int lowest_present_cck = -1;
+ uint8_t cck = 0;
+ uint8_t ofdm = 0;
+ int i;
+
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
+ IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
+ for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ cck |= (1 << i);
+ if (lowest_present_cck == -1 || lowest_present_cck > i)
+ lowest_present_cck = i;
+ }
+ }
+ for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
+ if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
+ lowest_present_ofdm = i;
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
+ if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWX_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+static void
+iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_ctx_cmd *cmd, uint32_t action)
+{
+#define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int cck_ack_rates, ofdm_ack_rates;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd->action = htole32(action);
+
+ if (action == IWX_FW_CTXT_ACTION_REMOVE)
+ return;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
+ else if (ic->ic_opmode == IEEE80211_M_STA)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
+ else
+ panic("unsupported operating mode %d", ic->ic_opmode);
+ cmd->tsf_id = htole32(IWX_TSF_ID_A);
+
+ IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
+ DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
+ ether_sprintf(cmd->node_addr)));
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
+ return;
+ }
+
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
+ DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
+ ether_sprintf(cmd->bssid_addr)));
+ iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
+ cmd->cck_rates = htole32(cck_ack_rates);
+ cmd->ofdm_rates = htole32(ofdm_ack_rates);
+
+ cmd->cck_short_preamble
+ = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot
+ = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
+ ? IWX_MAC_FLG_SHORT_SLOT : 0);
+
+ struct chanAccParams chp;
+ ieee80211_wme_vap_getparams(vap, &chp);
+
+ for (int i = 0; i < WME_NUM_AC; i++) {
+ int txf = iwx_ac_to_tx_fifo[i];
+ cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
+ cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
+ cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
+ cmd->ac[txf].fifos_mask = (1 << txf);
+ cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
+
+ cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_QOS) {
+ DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ switch (vap->iv_curhtprotmode) {
+ case IEEE80211_HTINFO_OPMODE_PURE:
+ break;
+ case IEEE80211_HTINFO_OPMODE_PROTOPT:
+ case IEEE80211_HTINFO_OPMODE_MIXED:
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT |
+ IWX_MAC_PROT_FLG_FAT_PROT);
+ break;
+ case IEEE80211_HTINFO_OPMODE_HT20PR:
+ if (in->in_phyctxt &&
+ (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
+ in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT |
+ IWX_MAC_PROT_FLG_FAT_PROT);
+ }
+ break;
+ default:
+ break;
+ }
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
+ DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
+ }
+
+ if (ic->ic_flags & IEEE80211_F_USEPROT)
+ cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
+ cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
+#undef IWX_EXP2
+}
+
+static void
+iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_data_sta *sta, int assoc)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint32_t dtim_off;
+ uint64_t tsf;
+ int dtim_period;
+
+ dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
+ tsf = le64toh(ni->ni_tstamp.tsf);
+ dtim_period = vap->iv_dtim_period;
+
+ sta->is_assoc = htole32(assoc);
+
+ if (assoc) {
+ sta->dtim_time = htole32(tsf + dtim_off);
+ sta->dtim_tsf = htole64(tsf + dtim_off);
+ // XXX: unset in iwm
+ sta->assoc_beacon_arrive_time = 0;
+ }
+ sta->bi = htole32(ni->ni_intval);
+ sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
+ sta->data_policy = htole32(0);
+ sta->listen_interval = htole32(10);
+ sta->assoc_id = htole32(ni->ni_associd);
+}
+
+static int
+iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
+ int assoc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_mac_ctx_cmd cmd;
+ int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("MAC already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("MAC already removed");
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
+
+ if (action == IWX_FW_CTXT_ACTION_REMOVE) {
+ return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
+ sizeof(cmd), &cmd);
+ }
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
+ IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
+ IWX_MAC_FILTER_ACCEPT_GRP |
+ IWX_MAC_FILTER_IN_BEACON |
+ IWX_MAC_FILTER_IN_PROBE_REQUEST |
+ IWX_MAC_FILTER_IN_CRC32);
+ // XXX: dtim period is in vap
+ } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
+ /*
+ * Allow beacons to pass through as long as we are not
+ * associated or we do not have dtim period information.
+ */
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
+ }
+ iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
+ return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwx_clear_statistics(struct iwx_softc *sc)
+{
+ struct iwx_statistics_cmd scmd = {
+ .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
+ };
+ struct iwx_host_cmd cmd = {
+ .id = IWX_STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(struct iwx_notif_statistics),
+ };
+ int err;
+
+ err = iwx_send_cmd(sc, &cmd);
+ if (err)
+ return err;
+
+ iwx_free_resp(sc, &cmd);
+ return 0;
+}
+
+static int
+iwx_scan(struct iwx_softc *sc)
+{
+ int err;
+ err = iwx_umac_scan_v14(sc, 0);
+
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+ return 0;
+}
+
+static int
+iwx_bgscan(struct ieee80211com *ic)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ err = iwx_umac_scan_v14(sc, 1);
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+ return 0;
+}
+
+static int
+iwx_enable_mgmt_queue(struct iwx_softc *sc)
+{
+ int err;
+
+ sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
+
+ /*
+ * Non-QoS frames use the "MGMT" TID and queue.
+ * Other TIDs and data queues are reserved for QoS data frames.
+ */
+ err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
+ IWX_MGMT_TID, IWX_TX_RING_COUNT);
+ if (err) {
+ printf("%s: could not enable Tx queue %d (error %d)\n",
+ DEVNAME(sc), sc->first_data_qid, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_disable_mgmt_queue(struct iwx_softc *sc)
+{
+ int err, cmd_ver;
+
+ /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
+ return 0;
+
+ sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
+
+ err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
+ IWX_MGMT_TID);
+ if (err) {
+ printf("%s: could not disable Tx queue %d (error %d)\n",
+ DEVNAME(sc), sc->first_data_qid, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_rs_rval2idx(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11g rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return -1;
+}
+
+static uint16_t
+iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
+{
+ uint16_t htrates = 0;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ int i;
+
+ if (rsidx == IEEE80211_HT_RATESET_SISO) {
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] <= 7)
+ htrates |= (1 << htrs->rs_rates[i]);
+ }
+ } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
+ htrates |= (1 << (htrs->rs_rates[i] - 8));
+ }
+ } else
+ panic(("iwx_rs_ht_rates"));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
+ "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
+
+ return htrates;
+}
+
+uint16_t
+iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
+{
+ uint16_t rx_mcs;
+ int max_mcs = -1;
+#define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
+#define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
+ rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
+ IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
+ IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ max_mcs = 7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ max_mcs = 8;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ /* Disable VHT MCS 9 for 20MHz-only stations. */
+ if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
+ max_mcs = 8;
+ else
+ max_mcs = 9;
+ break;
+ default:
+ /* Should not happen; Values above cover the possible range. */
+ panic("invalid VHT Rx MCS value %u", rx_mcs);
+ }
+
+ return ((1 << (max_mcs + 1)) - 1);
+}
+
+static int
+iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
+{
+#if 1
+ panic("iwx: Trying to init rate set on untested version");
+#else
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct iwx_tlc_config_cmd_v3 cfg_cmd;
+ uint32_t cmd_id;
+ int i;
+ size_t cmd_size = sizeof(cfg_cmd);
+
+ memset(&cfg_cmd, 0, sizeof(cfg_cmd));
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ int idx = iwx_rs_rval2idx(rval);
+ if (idx == -1)
+ return EINVAL;
+ cfg_cmd.non_ht_rates |= (1 << idx);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 1));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 2));
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_SISO));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_MIMO2));
+ } else
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
+
+ cfg_cmd.sta_id = IWX_STATION_ID;
+ if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
+ in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ else
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+ cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
+ if (ni->ni_flags & IEEE80211_NODE_VHT)
+ cfg_cmd.max_mpdu_len = htole16(3895);
+ else
+ cfg_cmd.max_mpdu_len = htole16(3839);
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (ieee80211_node_supports_ht_sgi20(ni)) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_20MHZ);
+ }
+ if (ieee80211_node_supports_ht_sgi40(ni)) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_40MHZ);
+ }
+ }
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
+ ieee80211_node_supports_vht_sgi80(ni))
+ cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
+
+ cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
+#endif
+}
+
+static int
+iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ struct iwx_tlc_config_cmd_v4 cfg_cmd;
+ uint32_t cmd_id;
+ int i;
+ int sgi80 = 0;
+ size_t cmd_size = sizeof(cfg_cmd);
+
+ memset(&cfg_cmd, 0, sizeof(cfg_cmd));
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ int idx = iwx_rs_rval2idx(rval);
+ if (idx == -1)
+ return EINVAL;
+ cfg_cmd.non_ht_rates |= (1 << idx);
+ }
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 1));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 2));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_SISO));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_MIMO2));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
+ } else
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
+
+ cfg_cmd.sta_id = IWX_STATION_ID;
+#if 0
+ if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
+ in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ else
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+#endif
+ if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ } else {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+
+ cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
+ if (ni->ni_flags & IEEE80211_NODE_VHT)
+ cfg_cmd.max_mpdu_len = htole16(3895);
+ else
+ cfg_cmd.max_mpdu_len = htole16(3839);
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_20MHZ);
+ }
+ if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_40MHZ);
+ }
+ }
+ sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
+ IEEE80211_VHTCAP_SHORT_GI_80);
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
+ cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
+ }
+
+ cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
+}
+
+static int
+iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int cmd_ver;
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_TLC_MNG_CONFIG_CMD);
+ if (cmd_ver == 4)
+ return iwx_rs_init_v4(sc, in);
+ else
+ return iwx_rs_init_v3(sc, in);
+}
+
+static void
+iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = (void *)vap->iv_bss;
+
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ uint32_t rate_n_flags;
+ uint8_t plcp, rval;
+ int i, cmd_ver, rate_n_flags_ver2 = 0;
+
+ if (notif->sta_id != IWX_STATION_ID ||
+ (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
+ return;
+
+ rate_n_flags = le32toh(notif->rate);
+
+ cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_TLC_MNG_UPDATE_NOTIF);
+ if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
+ rate_n_flags_ver2 = 1;
+ else
+ panic("hey look at that!");
+
+ if (rate_n_flags_ver2) {
+ uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
+ if (mod_type == IWX_RATE_MCS_HT_MSK) {
+ ieee80211_node_set_txrate_dot11rate(ni,
+ IWX_RATE_HT_MCS_INDEX(rate_n_flags));
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
+ "%s:%d new MCS idx: %d rate_n_flags: %x\n",
+ __func__, __LINE__,
+ ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
+ return;
+ }
+ } else {
+ if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
+ ieee80211_node_set_txrate_dot11rate(ni,
+ rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
+ IWX_RATE_HT_MCS_NSS_MSK_V1));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
+ "%s:%d new MCS idx: %d rate_n_flags: %x\n",
+ __func__, __LINE__,
+ ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
+ return;
+ }
+ }
+
+ if (rate_n_flags_ver2) {
+ const struct ieee80211_rateset *rs;
+ uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
+ if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
+ rs = &ieee80211_std_rateset_11a;
+ else
+ rs = &ieee80211_std_rateset_11b;
+ if (ridx < rs->rs_nrates)
+ rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
+ else
+ rval = 0;
+ } else {
+ plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
+
+ rval = 0;
+ for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
+ if (iwx_rates[i].plcp == plcp) {
+ rval = iwx_rates[i].rate;
+ break;
+ }
+ }
+ }
+
+ if (rval) {
+ uint8_t rv;
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ if (rv == rval) {
+ ieee80211_node_set_txrate_dot11rate(ni, i);
+ break;
+ }
+ }
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
+ "%s:%d new rate %d\n", __func__, __LINE__,
+ ieee80211_node_get_txrate_dot11rate(ni));
+ }
+}
+
+static int
+iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
+ uint8_t chains_static, uint8_t chains_dynamic)
+{
+ struct iwx_rlc_config_cmd cmd;
+ uint32_t cmd_id;
+ uint8_t active_cnt, idle_cnt;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd.phy_id = htole32(phyctxt->id);
+ cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rlc.rx_chain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
+ return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
+ struct ieee80211_channel *chan, uint8_t chains_static,
+ uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
+ uint8_t vht_chan_width)
+{
+ uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+ int err;
+
+ if (chan == IEEE80211_CHAN_ANYC) {
+ printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
+ DEVNAME(sc));
+ return EIO;
+ }
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ (phyctxt->channel->ic_flags & band_flags) !=
+ (chan->ic_flags & band_flags)) {
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not remove PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ phyctxt->channel = chan;
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not add PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ } else {
+ phyctxt->channel = chan;
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not update PHY context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
+ phyctxt->sco = sco;
+ phyctxt->vht_chan_width = vht_chan_width;
+
+ DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
+ phyctxt->channel->ic_ieee));
+ DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
+ DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
+ phyctxt->vht_chan_width));
+
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) == 2)
+ return iwx_phy_send_rlc(sc, phyctxt,
+ chains_static, chains_dynamic);
+
+ return 0;
+}
+
+static int
+iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in;
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct ieee80211_node *ni;
+ uint32_t duration;
+ int generation = sc->sc_generation, err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ ni = ieee80211_ref_node(vap->iv_bss);
+ in = IWX_NODE(ni);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+ } else {
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+ }
+ ivp->phy_ctxt = &sc->sc_phyctxt[0];
+ IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
+ DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
+ ether_sprintf(in->in_macaddr)));
+
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
+ if (err) {
+ printf("%s: could not add MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
+
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
+ if (err) {
+ printf("%s: could not add binding (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_mac_ctxt;
+ }
+ sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
+
+ err = iwx_add_sta_cmd(sc, in, 0);
+ if (err) {
+ printf("%s: could not add sta (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_binding;
+ }
+ sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
+ IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
+ IWX_TX_RING_COUNT);
+ if (err)
+ goto rm_sta;
+ return 0;
+ }
+
+ err = iwx_enable_mgmt_queue(sc);
+ if (err)
+ goto rm_sta;
+
+ err = iwx_clear_statistics(sc);
+ if (err)
+ goto rm_mgmt_queue;
+
+ /*
+ * Prevent the FW from wandering off channel during association
+ * by "protecting" the session with a time event.
+ */
+ if (in->in_ni.ni_intval)
+ duration = in->in_ni.ni_intval * 9;
+ else
+ duration = 900;
+ return iwx_schedule_session_protection(sc, in, duration);
+
+rm_mgmt_queue:
+ if (generation == sc->sc_generation)
+ iwx_disable_mgmt_queue(sc);
+rm_sta:
+ if (generation == sc->sc_generation) {
+ iwx_rm_sta_cmd(sc, in);
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+rm_binding:
+ if (generation == sc->sc_generation) {
+ iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+rm_mac_ctxt:
+ if (generation == sc->sc_generation) {
+ iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+ return err;
+}
+
+static int
+iwx_deauth(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ iwx_unprotect_session(sc, in);
+
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_rm_sta(sc, in);
+ if (err)
+ return err;
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+
+ if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ if (err) {
+ printf("%s: could not remove binding (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+
+ DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
+ IWX_FLAG_MAC_ACTIVE));
+ if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ if (err) {
+ printf("%s: could not remove MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+
+ /* Move unused PHY context to a default channel. */
+ //TODO uncommented in obsd, but stays on the way of auth->auth
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
+ uint8_t sco, vht_chan_width;
+ sco = IEEE80211_HTOP0_SCO_SCN;
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
+ IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
+ vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
+ else
+ vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
+ err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
+ ivp->phy_ctxt->channel, chains, chains,
+ 0, sco, vht_chan_width);
+ if (err) {
+ printf("%s: failed to update PHY\n", DEVNAME(sc));
+ return err;
+ }
+ }
+
+ /* Update STA again to apply HT and VHT settings. */
+ err = iwx_add_sta_cmd(sc, in, 1);
+ if (err) {
+ printf("%s: could not update STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /* We have now been assigned an associd by the AP. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_sf_config(sc, IWX_SF_FULL_ON);
+ if (err) {
+ printf("%s: could not set sf full on (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_allow_mcast(sc);
+ if (err) {
+ printf("%s: could not allow mcast (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+#ifdef notyet
+ /*
+ * Disabled for now. Default beacon filter settings
+ * prevent net80211 from getting ERP and HT protection
+ * updates from beacons.
+ */
+ err = iwx_enable_beacon_filter(sc, in);
+ if (err) {
+ printf("%s: could not enable beacon filter\n",
+ DEVNAME(sc));
+ return err;
+ }
+#endif
+ err = iwx_power_mac_update_mode(sc, in);
+ if (err) {
+ printf("%s: could not update MAC power (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ return 0;
+
+ err = iwx_rs_init(sc, in);
+ if (err) {
+ printf("%s: could not init rate scaling (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_run_stop(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ int err, i;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /*
+ * Stop Rx BA sessions now. We cannot rely on the BA task
+ * for this when moving out of RUN state since it runs in a
+ * separate thread.
+ * Note that in->in_ni (struct ieee80211_node) already represents
+ * our new access point in case we are roaming between APs.
+ * This means we cannot rely on struct ieee802111_node to tell
+ * us which BA sessions exist.
+ */
+ // TODO agg
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
+ }
+
+ err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
+ if (err)
+ return err;
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /* Mark station as disassociated. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ return 0;
+}
+
+static struct ieee80211_node *
+iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ return malloc(sizeof (struct iwx_node), M_80211_NODE,
+ M_NOWAIT | M_ZERO);
+}
+
+#if 0
+int
+iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_node *in = (void *)ni;
+ struct iwx_setkey_task_arg *a;
+ int err;
+
+ if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
+ /* Fallback to software crypto for other ciphers. */
+ err = ieee80211_set_key(ic, ni, k);
+ if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
+ in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
+ return err;
+ }
+
+ if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
+ return ENOSPC;
+
+ a = &sc->setkey_arg[sc->setkey_cur];
+ a->sta_id = IWX_STATION_ID;
+ a->ni = ni;
+ a->k = k;
+ sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
+ sc->setkey_nkeys++;
+ iwx_add_task(sc, systq, &sc->setkey_task);
+ return EBUSY;
+}
+
+int
+iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ni;
+ struct iwx_add_sta_key_cmd cmd;
+ uint32_t status;
+ const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
+ IWX_NODE_FLAG_HAVE_GROUP_KEY);
+ int err;
+
+ /*
+ * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
+ * Currently we only implement station mode where 'ni' is always
+ * ic->ic_bss so there is no need to validate arguments beyond this:
+ */
+ KASSERT(ni == ic->ic_bss);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
+ IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ if (k->k_flags & IEEE80211_KEY_GROUP) {
+ cmd.common.key_offset = 1;
+ cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
+ } else
+ cmd.common.key_offset = 0;
+
+ memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
+ cmd.common.sta_id = sta_id;
+
+ cmd.transmit_seq_cnt = htole64(k->k_tsc);
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
+ &status);
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ return ECANCELED;
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+ if (err) {
+ IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_AUTH_LEAVE);
+ ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
+ return err;
+ }
+
+ if (k->k_flags & IEEE80211_KEY_GROUP)
+ in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
+ else
+ in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
+
+ if ((in->in_flags & want_keymask) == want_keymask) {
+ DPRINTF(("marking port %s valid\n",
+ ether_sprintf(ni->ni_macaddr)));
+ ni->ni_port_valid = 1;
+ ieee80211_set_link_state(ic, LINK_STATE_UP);
+ }
+
+ return 0;
+}
+
+void
+iwx_setkey_task(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct iwx_setkey_task_arg *a;
+ int err = 0, s = splnet();
+
+ while (sc->setkey_nkeys > 0) {
+ if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
+ break;
+ a = &sc->setkey_arg[sc->setkey_tail];
+ err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
+ a->sta_id = 0;
+ a->ni = NULL;
+ a->k = NULL;
+ sc->setkey_tail = (sc->setkey_tail + 1) %
+ nitems(sc->setkey_arg);
+ sc->setkey_nkeys--;
+ }
+
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+void
+iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_add_sta_key_cmd cmd;
+
+ if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
+ /* Fallback to software crypto for other ciphers. */
+ ieee80211_delete_key(ic, ni, k);
+ return;
+ }
+
+ if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
+ IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
+ if (k->k_flags & IEEE80211_KEY_GROUP)
+ cmd.common.key_offset = 1;
+ else
+ cmd.common.key_offset = 0;
+ cmd.common.sta_id = IWX_STATION_ID;
+
+ iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
+}
+#endif
+
+static int
+iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ enum ieee80211_state ostate = vap->iv_state;
+ int err = 0;
+
+ IWX_LOCK(sc);
+
+ if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
+ switch (ostate) {
+ case IEEE80211_S_RUN:
+ err = iwx_run_stop(sc);
+ if (err)
+ goto out;
+ /* FALLTHROUGH */
+ case IEEE80211_S_ASSOC:
+ case IEEE80211_S_AUTH:
+ if (nstate <= IEEE80211_S_AUTH) {
+ err = iwx_deauth(sc);
+ if (err)
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_INIT:
+ default:
+ break;
+ }
+//
+// /* Die now if iwx_stop() was called while we were sleeping. */
+// if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+// refcnt_rele_wake(&sc->task_refs);
+// splx(s);
+// return;
+// }
+ }
+
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ break;
+
+ case IEEE80211_S_SCAN:
+ break;
+
+ case IEEE80211_S_AUTH:
+ err = iwx_auth(vap, sc);
+ break;
+
+ case IEEE80211_S_ASSOC:
+ break;
+
+ case IEEE80211_S_RUN:
+ err = iwx_run(vap, sc);
+ break;
+ default:
+ break;
+ }
+
+out:
+ IWX_UNLOCK(sc);
+
+ return (err);
+}
+
+static int
+iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state ostate = vap->iv_state;
+ int err;
+
+ /*
+ * Prevent attempts to transition towards the same state, unless
+ * we are scanning in which case a SCAN -> SCAN transition
+ * triggers another scan iteration. And AUTH -> AUTH is needed
+ * to support band-steering.
+ */
+ if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
+ nstate != IEEE80211_S_AUTH)
+ return 0;
+ IEEE80211_UNLOCK(ic);
+ err = iwx_newstate_sub(vap, nstate);
+ IEEE80211_LOCK(ic);
+ if (err == 0)
+ err = ivp->iv_newstate(vap, nstate, arg);
+
+ return (err);
+}
+
+static void
+iwx_endscan(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
+ return;
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+
+ ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
+ wakeup(&vap->iv_state); /* wake up iwx_newstate */
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const uint32_t
+iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER_DEF),
+ htole32(IWX_SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const uint32_t
+iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER),
+ htole32(IWX_SF_MCAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER),
+ htole32(IWX_SF_BA_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void
+iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_node *ni)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (ni) {
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ int hasmimo = 0;
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7) {
+ hasmimo = 1;
+ break;
+ }
+ }
+ if (hasmimo)
+ watermark = IWX_SF_W_MARK_MIMO2;
+ else
+ watermark = IWX_SF_W_MARK_SISO;
+ } else {
+ watermark = IWX_SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = IWX_SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
+
+ for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (ni) {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
+ sizeof(iwx_sf_full_timeout));
+ } else {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
+ sizeof(iwx_sf_full_timeout_def));
+ }
+
+}
+
+static int
+iwx_sf_config(struct iwx_softc *sc, int new_state)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct iwx_sf_cfg_cmd sf_cmd = {
+ .state = htole32(new_state),
+ };
+ int err = 0;
+
+ switch (new_state) {
+ case IWX_SF_UNINIT:
+ case IWX_SF_INIT_OFF:
+ iwx_fill_sf_command(sc, &sf_cmd, NULL);
+ break;
+ case IWX_SF_FULL_ON:
+ iwx_fill_sf_command(sc, &sf_cmd, ni);
+ break;
+ default:
+ return EINVAL;
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ return err;
+}
+
+static int
+iwx_send_bt_init_conf(struct iwx_softc *sc)
+{
+ struct iwx_bt_coex_cmd bt_cmd;
+
+ bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
+
+ bt_cmd.mode = htole32(IWX_BT_COEX_NW);
+ bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
+ bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
+
+
+ return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
+ &bt_cmd);
+}
+
+static int
+iwx_send_soc_conf(struct iwx_softc *sc)
+{
+ struct iwx_soc_configuration_cmd cmd;
+ int err;
+ uint32_t cmd_id, flags = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other flag bits.
+ */
+ if (!sc->sc_integrated) { /* VER_1 */
+ flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
+ } else { /* VER_2 */
+ uint8_t scan_cmd_ver;
+ if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
+ flags |= (sc->sc_ltr_delay &
+ IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+ scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
+ IWX_SCAN_REQ_UMAC);
+ if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
+ scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
+ flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
+ }
+ cmd.flags = htole32(flags);
+
+ cmd.latency = htole32(sc->sc_xtal_latency);
+
+ cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
+ err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+static int
+iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
+{
+ struct iwx_mcc_update_cmd mcc_cmd;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_MCC_UPDATE_CMD,
+ .flags = IWX_CMD_WANT_RESP,
+ .data = { &mcc_cmd },
+ };
+ struct iwx_rx_packet *pkt;
+ struct iwx_mcc_update_resp *resp;
+ size_t resp_len;
+ int err;
+
+ memset(&mcc_cmd, 0, sizeof(mcc_cmd));
+ mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
+ mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
+ else
+ mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
+
+ hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
+ hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len < sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ if (resp_len != sizeof(*resp) +
+ resp->n_channels * sizeof(resp->channels[0])) {
+ err = EIO;
+ goto out;
+ }
+
+ DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
+ resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
+
+out:
+ iwx_free_resp(sc, &hcmd);
+
+ return err;
+}
+
+static int
+iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
+{
+ struct iwx_temp_report_ths_cmd cmd;
+ int err;
+
+ /*
+ * In order to give responsibility for critical-temperature-kill
+ * and TX backoff to FW we need to send an empty temperature
+ * reporting command at init time.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwx_send_cmd_pdu(sc,
+ IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
+ DEVNAME(sc), err);
+
+ return err;
+}
+
+static int
+iwx_init_hw(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err = 0, i;
+
+ err = iwx_run_init_mvm_ucode(sc, 0);
+ if (err)
+ return err;
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
+ if (err) {
+ printf("%s: could not init tx ant config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (sc->sc_tx_with_siso_diversity) {
+ err = iwx_send_phy_cfg_cmd(sc);
+ if (err) {
+ printf("%s: could not send phy config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_send_bt_init_conf(sc);
+ if (err) {
+ printf("%s: could not init bt coex (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_send_soc_conf(sc);
+ if (err) {
+ printf("%s: iwx_send_soc_conf failed\n", __func__);
+ return err;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
+ printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
+ err = iwx_send_dqa_cmd(sc);
+ if (err) {
+ printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
+ "failed (error %d)\n", __func__, err);
+ return err;
+ }
+ }
+ // TODO phyctxt
+ for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ sc->sc_phyctxt[i].id = i;
+ sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
+ IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
+ if (err) {
+ printf("%s: could not add phy context %d (error %d)\n",
+ DEVNAME(sc), i, err);
+ goto err;
+ }
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) == 2) {
+ err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
+ if (err) {
+ printf("%s: could not configure RLC for PHY "
+ "%d (error %d)\n", DEVNAME(sc), i, err);
+ goto err;
+ }
+ }
+ }
+
+ err = iwx_config_ltr(sc);
+ if (err) {
+ printf("%s: PCIe LTR configuration failed (error %d)\n",
+ DEVNAME(sc), err);
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
+ err = iwx_send_temp_report_ths_cmd(sc);
+ if (err) {
+ printf("%s: iwx_send_temp_report_ths_cmd failed\n",
+ __func__);
+ goto err;
+ }
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (sc->sc_nvm.lar_enabled) {
+ err = iwx_send_update_mcc_cmd(sc, "ZZ");
+ if (err) {
+ printf("%s: could not init LAR (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_config_umac_scan_reduced(sc);
+ if (err) {
+ printf("%s: could not configure scan (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+err:
+ iwx_nic_unlock(sc);
+ return err;
+}
+
+/* Allow multicast from our BSSID. */
+static int
+iwx_allow_mcast(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct iwx_mcast_filter_cmd *cmd;
+ size_t size;
+ int err;
+
+ size = roundup(sizeof(*cmd), 4);
+ cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (cmd == NULL)
+ return ENOMEM;
+ cmd->filter_own = 1;
+ cmd->port_id = 0;
+ cmd->count = 0;
+ cmd->pass_all = 1;
+ IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
+ 0, size, cmd);
+ free(cmd, M_DEVBUF);
+ return err;
+}
+
+static int
+iwx_init(struct iwx_softc *sc)
+{
+ int err, generation;
+ generation = ++sc->sc_generation;
+ iwx_preinit(sc);
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: iwx_start_hw failed\n", __func__);
+ return err;
+ }
+
+ err = iwx_init_hw(sc);
+ if (err) {
+ if (generation == sc->sc_generation)
+ iwx_stop_device(sc);
+ printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
+ return err;
+ }
+
+ sc->sc_flags |= IWX_FLAG_HW_INITED;
+ callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
+
+ return 0;
+}
+
+static void
+iwx_start(struct iwx_softc *sc)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (iwx_tx(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ continue;
+ }
+ }
+}
+
+static void
+iwx_stop(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_vap *ivp = IWX_VAP(vap);
+
+ iwx_stop_device(sc);
+
+ /* Reset soft state. */
+ sc->sc_generation++;
+ ivp->phy_ctxt = NULL;
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_HW_ERR;
+ sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
+
+ sc->sc_rx_ba_sessions = 0;
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
+}
+
+static void
+iwx_watchdog(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int i;
+
+ /*
+ * We maintain a separate timer for each Tx queue because
+ * Tx aggregation queues can get "stuck" while other queues
+ * keep working. The Linux driver uses a similar workaround.
+ */
+ for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
+ if (sc->sc_tx_timer[i] > 0) {
+ if (--sc->sc_tx_timer[i] == 0) {
+ printf("%s: device timeout\n", DEVNAME(sc));
+
+ if (sc->sc_debug)
+ iwx_bbl_print_log();
+
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ ieee80211_restart_all(ic);
+ return;
+ }
+ }
+ }
+ callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with uint32_t-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t trm_hw_status0; /* TRM HW status */
+ uint32_t trm_hw_status1; /* TRM HW status */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t bcon_time; /* beacon timer */
+ uint32_t tsf_low; /* network timestamp function timer */
+ uint32_t tsf_hi; /* network timestamp function timer */
+ uint32_t gp1; /* GP1 timer register */
+ uint32_t gp2; /* GP2 timer register */
+ uint32_t fw_rev_type; /* firmware revision type */
+ uint32_t major; /* uCode version major */
+ uint32_t minor; /* uCode version minor */
+ uint32_t hw_ver; /* HW Silicon version */
+ uint32_t brd_ver; /* HW board version */
+ uint32_t log_pc; /* log program counter */
+ uint32_t frame_ptr; /* frame pointer */
+ uint32_t stack_ptr; /* stack pointer */
+ uint32_t hcmd; /* last host command header */
+ uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
+ uint32_t wait_event; /* wait event() caller address */
+ uint32_t l2p_control; /* L2pControlField */
+ uint32_t l2p_duration; /* L2pDurationField */
+ uint32_t l2p_mhvalid; /* L2pMhValidBits */
+ uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+ uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ uint32_t u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ uint32_t flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_umac_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t blink1; /* branch link */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t umac_major;
+ uint32_t umac_minor;
+ uint32_t frame_pointer; /* core register 27*/
+ uint32_t stack_pointer; /* core register 28 */
+ uint32_t cmd_header; /* latest host cmd sent to UMAC */
+ uint32_t nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
+#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
+
+static void
+iwx_nic_umac_error(struct iwx_softc *sc)
+{
+ struct iwx_umac_error_event_table table;
+ uint32_t base;
+
+ base = sc->sc_uc.uc_umac_error_event_table;
+
+ if (base < 0x400000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
+ printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
+ printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
+ printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
+ printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
+ printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
+ printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
+ table.frame_pointer);
+ printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
+ table.stack_pointer);
+ printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
+ printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
+ table.nic_isr_pref);
+}
+
+#define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
+static struct {
+ const char *name;
+ uint8_t num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *
+iwx_desc_lookup(uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < nitems(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num ==
+ (num & ~IWX_FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Support for dumping the error log seemed like a good idea ...
+ * but it's mostly hex junk and the only sensible thing is the
+ * hw/ucode revision (which we know anyway). Since it's here,
+ * I'll just leave it in, just in case e.g. the Intel guys want to
+ * help us decipher some "ADVANCED_SYSASSERT" later.
+ */
+static void
+iwx_nic_error(struct iwx_softc *sc)
+{
+ struct iwx_error_event_table table;
+ uint32_t base;
+
+ printf("%s: dumping device error log\n", DEVNAME(sc));
+ printf("%s: GOS-3758: 1\n", __func__);
+ base = sc->sc_uc.uc_lmac_error_event_table[0];
+ printf("%s: GOS-3758: 2\n", __func__);
+ if (base < 0x400000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ printf("%s: GOS-3758: 3\n", __func__);
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ printf("%s: GOS-3758: 4\n", __func__);
+ if (!table.valid) {
+ printf("%s: errlog not found, skipping\n", DEVNAME(sc));
+ return;
+ }
+
+ printf("%s: GOS-3758: 5\n", __func__);
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: GOS-3758: 6\n", __func__);
+ printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
+ table.trm_hw_status0);
+ printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
+ table.trm_hw_status1);
+ printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
+ printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
+ printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
+ printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
+ printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
+ printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
+ printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
+ printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
+ printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
+ table.fw_rev_type);
+ printf("%s: %08X | uCode version major\n", DEVNAME(sc),
+ table.major);
+ printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
+ table.minor);
+ printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
+ printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
+ printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
+ printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
+ printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
+ printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
+ printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
+ printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
+ printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
+ printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
+ printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
+ printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
+ printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
+ printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
+ printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
+ printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
+ printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
+
+ if (sc->sc_uc.uc_umac_error_event_table)
+ iwx_nic_umac_error(sc);
+}
+
+static void
+iwx_dump_driver_status(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ enum ieee80211_state state = vap->iv_state;
+ int i;
+
+ printf("driver status:\n");
+ for (i = 0; i < nitems(sc->txq); i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ printf(" tx ring %2d: qid=%-2d cur=%-3d "
+ "cur_hw=%-3d queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->cur_hw,
+ ring->queued);
+ }
+ printf(" rx ring: cur=%d\n", sc->rxq.cur);
+ printf(" 802.11 state %s\n", ieee80211_state_name[state]);
+}
+
+#define SYNC_RESP_STRUCT(_var_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
+ _var_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+static int
+iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
+{
+ int qid, idx, code;
+
+ qid = pkt->hdr.qid & ~0x80;
+ idx = pkt->hdr.idx;
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ return (!(qid == 0 && idx == 0 && code == 0) &&
+ pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
+}
+
+static void
+iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_rx_packet *pkt, *nextpkt;
+ uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
+ struct mbuf *m0, *m;
+ const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
+ int qid, idx, code, handled = 1;
+
+ m0 = data->m;
+ while (m0 && offset + minsz < IWX_RBUF_SIZE) {
+ pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
+ qid = pkt->hdr.qid;
+ idx = pkt->hdr.idx;
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ if (!iwx_rx_pkt_valid(pkt))
+ break;
+#ifdef IWX_DEBUG
+ iwx_bbl_add_entry(pkt->hdr.code, IWX_BBL_CMD_RX);
+#endif
+ /*
+ * XXX Intel inside (tm)
+ * Any commands in the LONG_GROUP could actually be in the
+ * LEGACY group. Firmware API versions >= 50 reject commands
+ * in group 0, forcing us to use this hack.
+ */
+ if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ struct iwx_tx_data *txdata = &ring->data[idx];
+ if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
+ code = iwx_cmd_opcode(code);
+ }
+
+ len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
+ if (len < minsz || len > (IWX_RBUF_SIZE - offset))
+ break;
+
+ // TODO ???
+ if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
+ /* Take mbuf m0 off the RX ring. */
+ if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
+ break;
+ }
+ KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
+ }
+
+ switch (code) {
+ case IWX_REPLY_RX_PHY_CMD:
+ /* XXX-THJ: I've not managed to hit this path in testing */
+ iwx_rx_rx_phy_cmd(sc, pkt, data);
+ break;
+
+ case IWX_REPLY_RX_MPDU_CMD: {
+ size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
+ nextoff = offset +
+ roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+ nextpkt = (struct iwx_rx_packet *)
+ (m0->m_data + nextoff);
+ /* AX210 devices ship only one packet per Rx buffer. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
+ nextoff + minsz >= IWX_RBUF_SIZE ||
+ !iwx_rx_pkt_valid(nextpkt)) {
+ /* No need to copy last frame in buffer. */
+ if (offset > 0)
+ m_adj(m0, offset);
+ iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
+ m0 = NULL; /* stack owns m0 now; abort loop */
+ } else {
+ /*
+ * Create an mbuf which points to the current
+ * packet. Always copy from offset zero to
+ * preserve m_pkthdr.
+ */
+ m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
+ if (m == NULL) {
+ m_freem(m0);
+ m0 = NULL;
+ break;
+ }
+ m_adj(m, offset);
+ iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
+ }
+ break;
+ }
+
+// case IWX_BAR_FRAME_RELEASE:
+// iwx_rx_bar_frame_release(sc, pkt, ml);
+// break;
+//
+ case IWX_TX_CMD:
+ iwx_rx_tx_cmd(sc, pkt, data);
+ break;
+
+ case IWX_BA_NOTIF:
+ iwx_rx_compressed_ba(sc, pkt);
+ break;
+
+ case IWX_MISSED_BEACONS_NOTIFICATION:
+ iwx_rx_bmiss(sc, pkt, data);
+ DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
+ __func__));
+ ieee80211_beacon_miss(ic);
+ break;
+
+ case IWX_MFUART_LOAD_NOTIFICATION:
+ break;
+
+ case IWX_ALIVE: {
+ struct iwx_alive_resp_v4 *resp4;
+ struct iwx_alive_resp_v5 *resp5;
+ struct iwx_alive_resp_v6 *resp6;
+
+ DPRINTF(("%s: firmware alive\n", __func__));
+ sc->sc_uc.uc_ok = 0;
+
+ /*
+ * For v5 and above, we can check the version, for older
+ * versions we need to check the size.
+ */
+ if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
+ IWX_ALIVE) == 6) {
+ SYNC_RESP_STRUCT(resp6, pkt);
+ if (iwx_rx_packet_payload_len(pkt) !=
+ sizeof(*resp6)) {
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp6->umac_data.dbg_ptrs.error_info_addr);
+ sc->sc_sku_id[0] =
+ le32toh(resp6->sku_id.data[0]);
+ sc->sc_sku_id[1] =
+ le32toh(resp6->sku_id.data[1]);
+ sc->sc_sku_id[2] =
+ le32toh(resp6->sku_id.data[2]);
+ if (resp6->status == IWX_ALIVE_STATUS_OK) {
+ sc->sc_uc.uc_ok = 1;
+ }
+ } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
+ IWX_ALIVE) == 5) {
+ SYNC_RESP_STRUCT(resp5, pkt);
+ if (iwx_rx_packet_payload_len(pkt) !=
+ sizeof(*resp5)) {
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp5->umac_data.dbg_ptrs.error_info_addr);
+ sc->sc_sku_id[0] =
+ le32toh(resp5->sku_id.data[0]);
+ sc->sc_sku_id[1] =
+ le32toh(resp5->sku_id.data[1]);
+ sc->sc_sku_id[2] =
+ le32toh(resp5->sku_id.data[2]);
+ if (resp5->status == IWX_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
+ SYNC_RESP_STRUCT(resp4, pkt);
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp4->umac_data.dbg_ptrs.error_info_addr);
+ if (resp4->status == IWX_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ } else
+ printf("unknown payload version");
+
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+
+ case IWX_STATISTICS_NOTIFICATION: {
+ struct iwx_notif_statistics *stats;
+ SYNC_RESP_STRUCT(stats, pkt);
+ memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
+ sc->sc_noise = iwx_get_noise(&stats->rx.general);
+ break;
+ }
+
+ case IWX_DTS_MEASUREMENT_NOTIFICATION:
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_DTS_MEASUREMENT_NOTIF_WIDE):
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_TEMP_REPORTING_THRESHOLDS_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_CT_KILL_NOTIFICATION): {
+ struct iwx_ct_kill_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ printf("%s: device at critical temperature (%u degC), "
+ "stopping device\n",
+ DEVNAME(sc), le16toh(notif->temperature));
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ ieee80211_restart_all(ic);
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD):
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
+ case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
+ IWX_SESSION_PROTECTION_CMD):
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_GET_INFO):
+ case IWX_ADD_STA_KEY:
+ case IWX_PHY_CONFIGURATION_CMD:
+ case IWX_TX_ANT_CONFIGURATION_CMD:
+ case IWX_ADD_STA:
+ case IWX_MAC_CONTEXT_CMD:
+ case IWX_REPLY_SF_CFG_CMD:
+ case IWX_POWER_TABLE_CMD:
+ case IWX_LTR_CONFIG:
+ case IWX_PHY_CONTEXT_CMD:
+ case IWX_BINDING_CONTEXT_CMD:
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
+ case IWX_REPLY_BEACON_FILTERING_CMD:
+ case IWX_MAC_PM_POWER_TABLE:
+ case IWX_TIME_QUOTA_CMD:
+ case IWX_REMOVE_STA:
+ case IWX_TXPATH_FLUSH:
+ case IWX_BT_CONFIG:
+ case IWX_MCC_UPDATE_CMD:
+ case IWX_TIME_EVENT_CMD:
+ case IWX_STATISTICS_CMD:
+ case IWX_SCD_QUEUE_CFG: {
+ size_t pkt_len;
+
+ if (sc->sc_cmd_resp_pkt[idx] == NULL)
+ break;
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ pkt_len = sizeof(pkt->len_n_flags) +
+ iwx_rx_packet_len(pkt);
+
+ if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
+ pkt_len < sizeof(*pkt) ||
+ pkt_len > sc->sc_cmd_resp_len[idx]) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ break;
+ }
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
+ break;
+ }
+
+ case IWX_INIT_COMPLETE_NOTIF:
+ sc->sc_init_complete |= IWX_INIT_COMPLETE;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ case IWX_SCAN_COMPLETE_UMAC: {
+ DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
+ struct iwx_umac_scan_complete *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
+ notif->status));
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
+ DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
+ __func__));
+ struct iwx_umac_scan_iter_complete_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
+ notif->status));
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_MCC_CHUB_UPDATE_CMD: {
+ struct iwx_mcc_chub_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ iwx_mcc_update(sc, notif);
+ break;
+ }
+
+ case IWX_REPLY_ERROR: {
+ struct iwx_error_resp *resp;
+ SYNC_RESP_STRUCT(resp, pkt);
+ printf("%s: firmware error 0x%x, cmd 0x%x\n",
+ DEVNAME(sc), le32toh(resp->error_type),
+ resp->cmd_id);
+ break;
+ }
+
+ case IWX_TIME_EVENT_NOTIFICATION: {
+ struct iwx_time_event_notif *notif;
+ uint32_t action;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ if (sc->sc_time_event_uid != le32toh(notif->unique_id))
+ break;
+ action = le32toh(notif->action);
+ if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
+ IWX_SESSION_PROTECTION_NOTIF): {
+ struct iwx_session_prot_notif *notif;
+ uint32_t status, start, conf_id;
+
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ status = le32toh(notif->status);
+ start = le32toh(notif->start);
+ conf_id = le32toh(notif->conf_id);
+ /* Check for end of successful PROTECT_CONF_ASSOC. */
+ if (status == 1 && start == 0 &&
+ conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
+ break;
+
+ /*
+ * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
+ * messages. Just ignore them for now.
+ */
+ case IWX_DEBUG_LOG_MSG:
+ break;
+
+ case IWX_MCAST_FILTER_CMD:
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE):
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
+ break; /* happens in monitor mode; ignore for now */
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_TLC_MNG_UPDATE_NOTIF): {
+ struct iwx_tlc_update_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ (void)notif;
+ if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
+ iwx_rs_update(sc, notif);
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
+ break;
+
+ /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
+ break;
+
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_PNVM_INIT_COMPLETE):
+ DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
+ sc->sc_init_complete |= IWX_PNVM_COMPLETE;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ default:
+ handled = 0;
+ /* XXX wulf: Get rid of bluetooth-related spam */
+ if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
+ (code == 0xce && pkt->len_n_flags == 0x2000002c))
+ break;
+ printf("%s: unhandled firmware response 0x%x/0x%x "
+ "rx ring %d[%d]\n",
+ DEVNAME(sc), code, pkt->len_n_flags,
+ (qid & ~0x80), idx);
+ break;
+ }
+
+ /*
+ * uCode sets bit 0x80 when it originates the notification,
+ * i.e. when the notification is not a direct response to a
+ * command sent by the driver.
+ * For example, uCode issues IWX_REPLY_RX when it sends a
+ * received frame to the driver.
+ */
+ if (handled && !(qid & (1 << 7))) {
+ iwx_cmd_done(sc, qid, idx, code);
+ }
+
+ offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+
+ /* AX210 devices ship only one packet per Rx buffer. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ break;
+ }
+
+ if (m0 && m0 != data->m)
+ m_freem(m0);
+}
+
+static void
+iwx_notif_intr(struct iwx_softc *sc)
+{
+ struct mbuf m;
+ uint16_t hw;
+
+ bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
+ BUS_DMASYNC_POSTREAD);
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ uint16_t *status = sc->rxq.stat_dma.vaddr;
+ hw = le16toh(*status) & 0xfff;
+ } else
+ hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
+ hw &= (IWX_RX_MQ_RING_COUNT - 1);
+ while (sc->rxq.cur != hw) {
+ struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ iwx_rx_pkt(sc, data, &m);
+ sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
+ }
+
+ /*
+ * Tell the firmware what we have processed.
+ * Seems like the hardware gets upset unless we align the write by 8??
+ */
+ hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
+}
+
+#if 0
+int
+iwx_intr(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ int r1, r2, rv = 0;
+
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ if (sc->sc_flags & IWX_FLAG_USE_ICT) {
+ uint32_t *ict = sc->ict_dma.vaddr;
+ int tmp;
+
+ tmp = htole32(ict[sc->ict_cur]);
+ if (!tmp)
+ goto out_ena;
+
+ /*
+ * ok, there was something. keep plowing until we have all.
+ */
+ r1 = r2 = 0;
+ while (tmp) {
+ r1 |= tmp;
+ ict[sc->ict_cur] = 0;
+ sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
+ tmp = htole32(ict[sc->ict_cur]);
+ }
+
+ /* this is where the fun begins. don't ask */
+ if (r1 == 0xffffffff)
+ r1 = 0;
+
+ /* i am not expected to understand this */
+ if (r1 & 0xc0000)
+ r1 |= 0x8000;
+ r1 = (0xff & r1) | ((0xff00 & r1) << 16);
+ } else {
+ r1 = IWX_READ(sc, IWX_CSR_INT);
+ if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
+ goto out;
+ r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
+ }
+ if (r1 == 0 && r2 == 0) {
+ goto out_ena;
+ }
+
+ IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
+
+ if (r1 & IWX_CSR_INT_BIT_ALIVE) {
+#if 0
+ int i;
+ /* Firmware has now configured the RFH. */
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+#endif
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+
+ if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
+ iwx_check_rfkill(sc);
+ rv = 1;
+ goto out_ena;
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
+ if (ifp->if_flags & IFF_DEBUG) {
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ }
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ ieee80211_restart_all(ic);
+ rv = 1;
+ goto out;
+
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ iwx_stop(sc);
+ rv = 1;
+ goto out;
+ }
+
+ /* firmware chunk loaded */
+ if (r1 & IWX_CSR_INT_BIT_FH_TX) {
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
+
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
+ IWX_CSR_INT_BIT_RX_PERIODIC)) {
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
+ }
+ if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
+ IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
+ }
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
+ IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
+ IWX_CSR_INT_PERIODIC_ENA);
+
+ iwx_notif_intr(sc);
+ }
+
+ rv = 1;
+
+ out_ena:
+ iwx_restore_interrupts(sc);
+ out:
+ return rv;
+}
+#endif
+
+static void
+iwx_intr_msix(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t inta_fh, inta_hw;
+ int vector = 0;
+
+ IWX_LOCK(sc);
+
+ inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ inta_fh &= sc->sc_fh_mask;
+ inta_hw &= sc->sc_hw_mask;
+
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
+ inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
+ iwx_notif_intr(sc);
+ }
+
+ /* firmware chunk loaded */
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ if (sc->sc_debug) {
+ iwx_bbl_print_log();
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ }
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ ieee80211_restart_all(ic);
+ goto out;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
+ iwx_check_rfkill(sc);
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ iwx_stop(sc);
+ goto out;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
+ IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
+ "%s:%d WARNING: Skipping rx desc update\n",
+ __func__, __LINE__);
+#if 0
+ /*
+ * XXX-THJ: we don't have the dma segment handy. This is hacked
+ * out in the fc release, return to it if we ever get this
+ * warning.
+ */
+ /* Firmware has now configured the RFH. */
+ for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+#endif
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
+out:
+ IWX_UNLOCK(sc);
+ return;
+}
+
+/*
+ * The device info table below contains device-specific config overrides.
+ * The most important parameter derived from this table is the name of the
+ * firmware image to load.
+ *
+ * The Linux iwlwifi driver uses an "old" and a "new" device info table.
+ * The "old" table matches devices based on PCI vendor/product IDs only.
+ * The "new" table extends this with various device parameters derived
+ * from MAC type, and RF type.
+ *
+ * In iwlwifi "old" and "new" tables share the same array, where "old"
+ * entries contain dummy values for data defined only for "new" entries.
+ * As of 2022, Linux developers are still in the process of moving entries
+ * from "old" to "new" style and it looks like this effort has stalled in
+ * in some work-in-progress state for quite a while. Linux commits moving
+ * entries from "old" to "new" have at times been reverted due to regressions.
+ * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
+ * devices in the same driver.
+ *
+ * Our table below contains mostly "new" entries declared in iwlwifi
+ * with the _IWL_DEV_INFO() macro (with a leading underscore).
+ * Other devices are matched based on PCI vendor/product ID as usual,
+ * unless matching specific PCI subsystem vendor/product IDs is required.
+ *
+ * Some "old"-style entries are required to identify the firmware image to use.
+ * Others might be used to print a specific marketing name into Linux dmesg,
+ * but we can't be sure whether the corresponding devices would be matched
+ * correctly in the absence of their entries. So we include them just in case.
+ */
+
+struct iwx_dev_info {
+ uint16_t device;
+ uint16_t subdevice;
+ uint16_t mac_type;
+ uint16_t rf_type;
+ uint8_t mac_step;
+ uint8_t rf_id;
+ uint8_t no_160;
+ uint8_t cores;
+ uint8_t cdb;
+ uint8_t jacket;
+ const struct iwx_device_cfg *cfg;
+};
+
+#define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
+ _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .mac_type = _mac_type, .rf_type = _rf_type, \
+ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
+
+#define IWX_DEV_INFO(_device, _subdevice, _cfg) \
+ _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
+ IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
+
+/*
+ * When adding entries to this table keep in mind that entries must
+ * be listed in the same order as in the Linux driver. Code walks this
+ * table backwards and uses the first matching entry it finds.
+ * Device firmware must be available in fw_update(8).
+ */
+static const struct iwx_dev_info iwx_dev_info_table[] = {
+ /* So with HR */
+ IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
+ IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
+ IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+
+ /* So with GF2 */
+ IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+
+ /* Qu with Jf, C step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
+
+ /* QuZ with Jf */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
+
+ /* Qu with Hr, B step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_b0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_b0_hr_b0), /* AX203 */
+
+ /* Qu with Hr, C step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr_b0), /* AX201 */
+
+ /* QuZ with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_quz_a0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_quz_a0_hr_b0), /* AX203 */
+
+ /* SoF with JF2 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
+
+ /* SoF with JF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
+
+ /* So with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* ax101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* ax201 */
+
+ /* So-F with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX201 */
+
+ /* So-F with GF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf_a0), /* AX211 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf4_a0), /* AX411 */
+
+ /* So with GF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf_a0), /* AX211 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf4_a0), /* AX411 */
+
+ /* So with JF2 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
+
+ /* So with JF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
+};
+
+static int
+iwx_preinit(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ if (sc->attached) {
+ return 0;
+ }
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_run_init_mvm_ucode(sc, 1);
+ iwx_stop_device(sc);
+ if (err) {
+ printf("%s: failed to stop device\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* Print version info and MAC address on first successful fw load. */
+ sc->attached = 1;
+ if (sc->sc_pnvm_ver) {
+ printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
+ "address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
+ sc->sc_fwver, sc->sc_pnvm_ver,
+ ether_sprintf(sc->sc_nvm.hw_addr));
+ } else {
+ printf("%s: hw rev 0x%x, fw %s, address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
+ sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
+ }
+
+ /* not all hardware can do 5GHz band */
+ if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
+ memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
+ sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
+
+ return 0;
+}
+
+static void
+iwx_attach_hook(void *self)
+{
+ struct iwx_softc *sc = (void *)self;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err;
+
+ IWX_LOCK(sc);
+ err = iwx_preinit(sc);
+ IWX_UNLOCK(sc);
+ if (err != 0)
+ goto out;
+
+ iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
+
+ ieee80211_ifattach(ic);
+ ic->ic_vap_create = iwx_vap_create;
+ ic->ic_vap_delete = iwx_vap_delete;
+ ic->ic_raw_xmit = iwx_raw_xmit;
+ ic->ic_node_alloc = iwx_node_alloc;
+ ic->ic_scan_start = iwx_scan_start;
+ ic->ic_scan_end = iwx_scan_end;
+ ic->ic_update_mcast = iwx_update_mcast;
+ ic->ic_getradiocaps = iwx_init_channel_map;
+
+ ic->ic_set_channel = iwx_set_channel;
+ ic->ic_scan_curchan = iwx_scan_curchan;
+ ic->ic_scan_mindwell = iwx_scan_mindwell;
+ ic->ic_wme.wme_update = iwx_wme_update;
+ ic->ic_parent = iwx_parent;
+ ic->ic_transmit = iwx_transmit;
+
+ sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
+ ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
+ sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
+ ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
+
+ sc->sc_addba_request = ic->ic_addba_request;
+ ic->ic_addba_request = iwx_addba_request;
+ sc->sc_addba_response = ic->ic_addba_response;
+ ic->ic_addba_response = iwx_addba_response;
+
+ iwx_radiotap_attach(sc);
+ ieee80211_announce(ic);
+out:
+ config_intrhook_disestablish(&sc->sc_preinit_hook);
+}
+
+const struct iwx_device_cfg *
+iwx_find_device_cfg(struct iwx_softc *sc)
+{
+ uint16_t mac_type, rf_type;
+ uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
+ int i;
+ uint16_t sdev_id;
+
+ sdev_id = pci_get_device(sc->sc_dev);
+ mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
+ mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
+ rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
+ cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
+ jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
+
+ rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
+ no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
+ cores = IWX_SUBDEVICE_CORES(sdev_id);
+
+ for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
+ const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
+
+ if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
+ dev_info->device != sc->sc_pid)
+ continue;
+
+ if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
+ dev_info->subdevice != sdev_id)
+ continue;
+
+ if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
+ dev_info->mac_type != mac_type)
+ continue;
+
+ if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
+ dev_info->mac_step != mac_step)
+ continue;
+
+ if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
+ dev_info->rf_type != rf_type)
+ continue;
+
+ if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
+ dev_info->cdb != cdb)
+ continue;
+
+ if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
+ dev_info->jacket != jacket)
+ continue;
+
+ if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
+ dev_info->rf_id != rf_id)
+ continue;
+
+ if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
+ dev_info->no_160 != no_160)
+ continue;
+
+ if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
+ dev_info->cores != cores)
+ continue;
+
+ return dev_info->cfg;
+ }
+
+ return NULL;
+}
+
+static int
+iwx_probe(device_t dev)
+{
+ int i;
+
+ for (i = 0; i < nitems(iwx_devices); i++) {
+ if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
+ pci_get_device(dev) == iwx_devices[i].device) {
+ device_set_desc(dev, iwx_devices[i].name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static int
+iwx_attach(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
+ const struct iwx_device_cfg *cfg;
+ int err;
+ int txq_i, i, j;
+ size_t ctxt_info_size;
+ int rid;
+ int count;
+ int error;
+ sc->sc_dev = dev;
+ sc->sc_pid = pci_get_device(dev);
+ sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
+
+ TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
+ IWX_LOCK_INIT(sc);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
+ TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
+ TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
+ sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->sc_tq);
+ error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
+ if (error != 0) {
+ device_printf(dev, "can't start taskq thread, error %d\n",
+ error);
+ return (ENXIO);
+ }
+
+ pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
+ if (sc->sc_cap_off == 0) {
+ device_printf(dev, "PCIe capability structure not found!\n");
+ return (ENXIO);
+ }
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+#define PCI_CFG_RETRY_TIMEOUT 0x41
+ pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
+
+ if (pci_msix_count(dev)) {
+ sc->sc_msix = 1;
+ } else {
+ device_printf(dev, "no MSI-X found\n");
+ return (ENXIO);
+ }
+
+ pci_enable_busmaster(dev);
+ rid = PCIR_BAR(0);
+ sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->sc_mem == NULL) {
+ device_printf(sc->sc_dev, "can't map mem space\n");
+ return (ENXIO);
+ }
+ sc->sc_st = rman_get_bustag(sc->sc_mem);
+ sc->sc_sh = rman_get_bushandle(sc->sc_mem);
+
+ count = 1;
+ rid = 0;
+ if (pci_alloc_msix(dev, &count) == 0)
+ rid = 1;
+ DPRINTF(("%s: count=%d\n", __func__, count));
+ sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
+ (rid != 0 ? 0 : RF_SHAREABLE));
+ if (sc->sc_irq == NULL) {
+ device_printf(dev, "can't map interrupt\n");
+ return (ENXIO);
+ }
+ error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, iwx_intr_msix, sc, &sc->sc_ih);
+ if (error != 0) {
+ device_printf(dev, "can't establish interrupt\n");
+ return (ENXIO);
+ }
+
+ /* Clear pending interrupts. */
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
+
+ sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
+ DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
+ sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
+ DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
+
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
+ (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
+
+ switch (sc->sc_pid) {
+ case PCI_PRODUCT_INTEL_WL_22500_1:
+ sc->sc_fwname = IWX_CC_A_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 0;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 0;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_2:
+ case PCI_PRODUCT_INTEL_WL_22500_5:
+ /* These devices should be QuZ only. */
+ if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
+ device_printf(dev, "unsupported AX201 adapter\n");
+ return (ENXIO);
+ }
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 500;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_3:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 500;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_4:
+ case PCI_PRODUCT_INTEL_WL_22500_7:
+ case PCI_PRODUCT_INTEL_WL_22500_8:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 1820;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_6:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
+ sc->sc_low_latency_xtal = 1;
+ sc->sc_xtal_latency = 12000;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_9:
+ case PCI_PRODUCT_INTEL_WL_22500_10:
+ case PCI_PRODUCT_INTEL_WL_22500_11:
+ case PCI_PRODUCT_INTEL_WL_22500_13:
+ /* _14 is an MA device, not yet supported */
+ case PCI_PRODUCT_INTEL_WL_22500_15:
+ case PCI_PRODUCT_INTEL_WL_22500_16:
+ sc->sc_fwname = IWX_SO_A_GF_A_FW;
+ sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
+ sc->sc_integrated = 0;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 0;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 1;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_12:
+ case PCI_PRODUCT_INTEL_WL_22500_17:
+ sc->sc_fwname = IWX_SO_A_GF_A_FW;
+ sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
+ sc->sc_low_latency_xtal = 1;
+ sc->sc_xtal_latency = 12000;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ sc->sc_imr_enabled = 1;
+ break;
+ default:
+ device_printf(dev, "unknown adapter type\n");
+ return (ENXIO);
+ }
+
+ cfg = iwx_find_device_cfg(sc);
+ DPRINTF(("%s: cfg=%p\n", __func__, cfg));
+ if (cfg) {
+ sc->sc_fwname = cfg->fw_name;
+ sc->sc_pnvm_name = cfg->pnvm_name;
+ sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
+ sc->sc_uhb_supported = cfg->uhb_supported;
+ if (cfg->xtal_latency) {
+ sc->sc_xtal_latency = cfg->xtal_latency;
+ sc->sc_low_latency_xtal = cfg->low_latency_xtal;
+ }
+ }
+
+ sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ sc->sc_umac_prph_offset = 0x300000;
+ sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
+ } else
+ sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
+
+ /* Allocate DMA memory for loading firmware. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ ctxt_info_size = sizeof(struct iwx_context_info_gen3);
+ else
+ ctxt_info_size = sizeof(struct iwx_context_info);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
+ ctxt_info_size, 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate memory for loading firmware\n");
+ return (ENXIO);
+ }
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
+ sizeof(struct iwx_prph_scratch), 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate prph scratch memory\n");
+ goto fail1;
+ }
+
+ /*
+ * Allocate prph information. The driver doesn't use this.
+ * We use the second half of this page to give the device
+ * some dummy TR/CR tail pointers - which shouldn't be
+ * necessary as we don't use this, but the hardware still
+ * reads/writes there and we can't let it go do that with
+ * a NULL pointer.
+ */
+ KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
+ ("iwx_prph_info has wrong size"));
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
+ PAGE_SIZE, 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate prph info memory\n");
+ goto fail1;
+ }
+ }
+
+ /* Allocate interrupt cause table (ICT).*/
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
+ IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
+ if (err) {
+ device_printf(dev, "could not allocate ICT table\n");
+ goto fail1;
+ }
+
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
+ err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
+ if (err) {
+ device_printf(dev, "could not allocate TX ring %d\n",
+ txq_i);
+ goto fail4;
+ }
+ }
+
+ err = iwx_alloc_rx_ring(sc, &sc->rxq);
+ if (err) {
+ device_printf(sc->sc_dev, "could not allocate RX ring\n");
+ goto fail4;
+ }
+
+#ifdef IWX_DEBUG
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
+ CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
+ CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
+ CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
+ CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
+ CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
+ CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
+ CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
+ CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
+ CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
+ CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
+ CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
+ CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
+#endif
+ ic->ic_softc = sc;
+ ic->ic_name = device_get_nameunit(sc->sc_dev);
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
+
+ /* Set device capabilities. */
+ ic->ic_caps =
+ IEEE80211_C_STA |
+ IEEE80211_C_MONITOR |
+ IEEE80211_C_WPA | /* WPA/RSN */
+ IEEE80211_C_WME |
+ IEEE80211_C_PMGT |
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_SHPREAMBLE | /* short preamble supported */
+ IEEE80211_C_BGSCAN /* capable of bg scanning */
+ ;
+ ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+
+ ic->ic_txstream = 2;
+ ic->ic_rxstream = 2;
+ ic->ic_htcaps |= IEEE80211_HTC_HT
+ | IEEE80211_HTCAP_SMPS_OFF
+ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
+ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
+ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
+ | IEEE80211_HTC_AMPDU /* tx A-MPDU */
+// | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
+ | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
+
+ ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
+
+ /*
+ * XXX: setupcurchan() expects vhtcaps to be non-zero
+ * https://bugs.freebsd.org/274156
+ */
+ ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
+ | IEEE80211_VHTCAP_SHORT_GI_80
+ | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
+ | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
+ | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
+ int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
+ ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
+
+ callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+ rxba->sc = sc;
+ for (j = 0; j < nitems(rxba->entries); j++)
+ mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
+ }
+
+ sc->sc_preinit_hook.ich_func = iwx_attach_hook;
+ sc->sc_preinit_hook.ich_arg = sc;
+ if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
+ device_printf(dev,
+ "config_intrhook_establish failed\n");
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ while (--txq_i >= 0)
+ iwx_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwx_free_rx_ring(sc, &sc->rxq);
+ if (sc->ict_dma.vaddr != NULL)
+ iwx_dma_contig_free(&sc->ict_dma);
+
+fail1:
+ iwx_dma_contig_free(&sc->ctxt_info_dma);
+ iwx_dma_contig_free(&sc->prph_scratch_dma);
+ iwx_dma_contig_free(&sc->prph_info_dma);
+ return (ENXIO);
+}
+
+static int
+iwx_detach(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ int txq_i;
+
+ iwx_stop_device(sc);
+
+ taskqueue_drain_all(sc->sc_tq);
+ taskqueue_free(sc->sc_tq);
+
+ ieee80211_ifdetach(&sc->sc_ic);
+
+ callout_drain(&sc->watchdog_to);
+
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
+ iwx_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwx_free_rx_ring(sc, &sc->rxq);
+
+ firmware_put(sc->fwp, FIRMWARE_UNLOAD);
+ sc->fwp = NULL;
+
+ if (sc->sc_irq != NULL) {
+ bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
+ bus_release_resource(dev, SYS_RES_IRQ,
+ rman_get_rid(sc->sc_irq), sc->sc_irq);
+ pci_release_msi(dev);
+ }
+ if (sc->sc_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->sc_mem), sc->sc_mem);
+
+ IWX_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static void
+iwx_radiotap_attach(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
+ "->%s begin\n", __func__);
+
+ ieee80211_radiotap_attach(ic,
+ &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
+ IWX_TX_RADIOTAP_PRESENT,
+ &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
+ IWX_RX_RADIOTAP_PRESENT);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
+ "->%s end\n", __func__);
+}
+
+struct ieee80211vap *
+iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
+ enum ieee80211_opmode opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct iwx_vap *ivp;
+ struct ieee80211vap *vap;
+
+ if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
+ return NULL;
+ ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
+ vap = &ivp->iv_vap;
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
+ vap->iv_bmissthreshold = 10; /* override default */
+ /* Override with driver methods. */
+ ivp->iv_newstate = vap->iv_newstate;
+ vap->iv_newstate = iwx_newstate;
+
+ ivp->id = IWX_DEFAULT_MACID;
+ ivp->color = IWX_DEFAULT_COLOR;
+
+ ivp->have_wme = TRUE;
+ ivp->ps_disabled = FALSE;
+
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
+
+ /* h/w crypto support */
+ vap->iv_key_alloc = iwx_key_alloc;
+ vap->iv_key_delete = iwx_key_delete;
+ vap->iv_key_set = iwx_key_set;
+ vap->iv_key_update_begin = iwx_key_update_begin;
+ vap->iv_key_update_end = iwx_key_update_end;
+
+ ieee80211_ratectl_init(vap);
+ /* Complete setup. */
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
+ ic->ic_opmode = opmode;
+
+ return vap;
+}
+
+static void
+iwx_vap_delete(struct ieee80211vap *vap)
+{
+ struct iwx_vap *ivp = IWX_VAP(vap);
+
+ ieee80211_ratectl_deinit(vap);
+ ieee80211_vap_detach(vap);
+ free(ivp, M_80211_VAP);
+}
+
+static void
+iwx_parent(struct ieee80211com *ic)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ IWX_LOCK(sc);
+
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
+ iwx_stop(sc);
+ sc->sc_flags &= ~IWX_FLAG_HW_INITED;
+ } else {
+ iwx_init(sc);
+ ieee80211_start_all(ic);
+ }
+ IWX_UNLOCK(sc);
+}
+
+static int
+iwx_suspend(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
+ iwx_stop(sc);
+ sc->sc_flags &= ~IWX_FLAG_HW_INITED;
+ }
+ return (0);
+}
+
+static int
+iwx_resume(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ int err;
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ return err;
+ }
+
+ err = iwx_init_hw(sc);
+ if (err) {
+ iwx_stop_device(sc);
+ return err;
+ }
+
+ ieee80211_start_all(&sc->sc_ic);
+
+ return (0);
+}
+
+static void
+iwx_scan_start(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ IWX_LOCK(sc);
+ if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
+ err = iwx_scan(sc);
+ else
+ err = iwx_bgscan(ic);
+ IWX_UNLOCK(sc);
+ if (err)
+ ieee80211_cancel_scan(vap);
+
+ return;
+}
+
+static void
+iwx_update_mcast(struct ieee80211com *ic)
+{
+}
+
+static void
+iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
+{
+}
+
+static void
+iwx_scan_mindwell(struct ieee80211_scan_state *ss)
+{
+}
+
+static void
+iwx_scan_end(struct ieee80211com *ic)
+{
+ iwx_endscan(ic->ic_softc);
+}
+
+static void
+iwx_set_channel(struct ieee80211com *ic)
+{
+#if 0
+ struct iwx_softc *sc = ic->ic_softc;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
+ iwx_phy_ctxt_task((void *)sc);
+#endif
+}
+
+static void
+iwx_endscan_cb(void *arg, int pending)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ DPRINTF(("scan ended\n"));
+ ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
+}
+
+static int
+iwx_wme_update(struct ieee80211com *ic)
+{
+ return 0;
+}
+
+static int
+iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ IWX_LOCK(sc);
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_tx(sc, m, ni);
+ IWX_UNLOCK(sc);
+ return err;
+ } else {
+ IWX_UNLOCK(sc);
+ return EIO;
+ }
+}
+
+static int
+iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ int error;
+
+ // TODO: mbufq_enqueue in iwm
+ // TODO dequeue in iwm_start, counters, locking
+ IWX_LOCK(sc);
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IWX_UNLOCK(sc);
+ return (error);
+ }
+
+ iwx_start(sc);
+ IWX_UNLOCK(sc);
+ return (0);
+}
+
+static int
+iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
+ int baparamset, int batimeout, int baseqctl)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ int tid;
+
+ tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
+ sc->ni_rx_ba[tid].ba_winstart =
+ _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
+ sc->ni_rx_ba[tid].ba_winsize =
+ _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
+ sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
+
+ if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
+ tid >= IWX_MAX_TID_COUNT)
+ return ENOSPC;
+
+ if (sc->ba_rx.start_tidmask & (1 << tid)) {
+ DPRINTF(("%s: tid %d already added\n", __func__, tid));
+ return EBUSY;
+ }
+ DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
+
+ sc->ba_rx.start_tidmask |= (1 << tid);
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
+ DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
+ DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
+
+ taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
+
+ // TODO:misha move to ba_task (serialize)
+ sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
+
+ return (0);
+}
+
+static void
+iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
+{
+ return;
+}
+
+static int
+iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout)
+{
+ struct iwx_softc *sc = ni->ni_ic->ic_softc;
+ int tid;
+
+ tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ sc->ba_tx.start_tidmask |= (1 << tid);
+ taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
+ return 0;
+}
+
+
+static int
+iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int code, int baparamset, int batimeout)
+{
+ return 0;
+}
+
+static void
+iwx_key_update_begin(struct ieee80211vap *vap)
+{
+ return;
+}
+
+static void
+iwx_key_update_end(struct ieee80211vap *vap)
+{
+ return;
+}
+
+static int
+iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
+ ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
+{
+
+ if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ return 1;
+ }
+ if (!(&vap->iv_nw_keys[0] <= k &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ /*
+ * Not in the global key table, the driver should handle this
+ * by allocating a slot in the h/w key table/cache. In
+ * lieu of that return key slot 0 for any unicast key
+ * request. We disallow the request if this is a group key.
+ * This default policy does the right thing for legacy hardware
+ * with a 4 key table. It also handles devices that pass
+ * packets through untouched when marked with the WEP bit
+ * and key index 0.
+ */
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ return 0;
+ *keyix = 0; /* NB: use key index 0 for ucast key */
+ } else {
+ *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
+ }
+ *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
+ return 1;
+}
+
+static int
+iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_add_sta_key_cmd cmd;
+ uint32_t status;
+ int err;
+ int id;
+
+ if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
+ return 1;
+ }
+
+ IWX_LOCK(sc);
+ /*
+ * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
+ * Currently we only implement station mode where 'ni' is always
+ * ic->ic_bss so there is no need to validate arguments beyond this:
+ */
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ DPRINTF(("%s: adding group key\n", __func__));
+ } else {
+ DPRINTF(("%s: adding key\n", __func__));
+ }
+ if (k >= &vap->iv_nw_keys[0] &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
+ id = (k - vap->iv_nw_keys);
+ else
+ id = (0);
+ DPRINTF(("%s: setting keyid=%i\n", __func__, id));
+ cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
+ IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ cmd.common.key_offset = 1;
+ cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
+ } else {
+ cmd.common.key_offset = 0;
+ }
+ memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
+ k->wk_keylen));
+ DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
+ for (int i=0; i<k->wk_keylen; i++) {
+ DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
+ }
+ cmd.common.sta_id = IWX_STATION_ID;
+
+ cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
+ DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
+ &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+ if (err) {
+ printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
+ IWX_UNLOCK(sc);
+ return err;
+ } else
+ DPRINTF(("%s: key added successfully\n", __func__));
+ IWX_UNLOCK(sc);
+ return 1;
+}
+
+static int
+iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ return 1;
+}
+
+static device_method_t iwx_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, iwx_probe),
+ DEVMETHOD(device_attach, iwx_attach),
+ DEVMETHOD(device_detach, iwx_detach),
+ DEVMETHOD(device_suspend, iwx_suspend),
+ DEVMETHOD(device_resume, iwx_resume),
+
+ DEVMETHOD_END
+};
+
+static driver_t iwx_pci_driver = {
+ "iwx",
+ iwx_pci_methods,
+ sizeof (struct iwx_softc)
+};
+
+DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
+MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
+ iwx_devices, nitems(iwx_devices));
+MODULE_DEPEND(iwx, firmware, 1, 1, 1);
+MODULE_DEPEND(iwx, pci, 1, 1, 1);
+MODULE_DEPEND(iwx, wlan, 1, 1, 1);
diff --git a/sys/dev/iwx/if_iwx_debug.h b/sys/dev/iwx/if_iwx_debug.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iwx/if_iwx_debug.h
@@ -0,0 +1,453 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * This software was developed by Tom Jones <thj@FreeBSD.org> under sponsorship
+ * from the FreeBSD Foundation.
+ */
+
+//#include <dev/iwx/if_iwxreg.h>
+
+#ifndef __IF_IWX_DEBUG_H__
+#define __IF_IWX_DEBUG_H__
+
+#ifdef IWX_DEBUG
+enum {
+ IWX_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
+ IWX_DEBUG_RECV = 0x00000002, /* basic recv operation */
+ IWX_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
+ IWX_DEBUG_TXPOW = 0x00000008, /* tx power processing */
+ IWX_DEBUG_RESET = 0x00000010, /* reset processing */
+ IWX_DEBUG_OPS = 0x00000020, /* iwx_ops processing */
+ IWX_DEBUG_BEACON = 0x00000040, /* beacon handling */
+ IWX_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
+ IWX_DEBUG_INTR = 0x00000100, /* ISR */
+ IWX_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
+ IWX_DEBUG_NODE = 0x00000400, /* node management */
+ IWX_DEBUG_LED = 0x00000800, /* led management */
+ IWX_DEBUG_CMD = 0x00001000, /* cmd submission */
+ IWX_DEBUG_TXRATE = 0x00002000, /* TX rate debugging */
+ IWX_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */
+ IWX_DEBUG_SCAN = 0x00008000, /* Scan related operations */
+ IWX_DEBUG_STATS = 0x00010000, /* Statistics updates */
+ IWX_DEBUG_FIRMWARE_TLV = 0x00020000, /* Firmware TLV parsing */
+ IWX_DEBUG_TRANS = 0x00040000, /* Transport layer (eg PCIe) */
+ IWX_DEBUG_EEPROM = 0x00080000, /* EEPROM/channel information */
+ IWX_DEBUG_TEMP = 0x00100000, /* Thermal Sensor handling */
+ IWX_DEBUG_FW = 0x00200000, /* Firmware management */
+ IWX_DEBUG_LAR = 0x00400000, /* Location Aware Regulatory */
+ IWX_DEBUG_TE = 0x00800000, /* Time Event handling */
+ /* 0x0n000000 are available */
+ IWX_DEBUG_NI = 0x10000000, /* Not Implemented */
+ IWX_DEBUG_REGISTER = 0x20000000, /* print chipset register */
+ IWX_DEBUG_TRACE = 0x40000000, /* Print begin and start driver function */
+ IWX_DEBUG_FATAL = 0x80000000, /* fatal errors */
+ IWX_DEBUG_ANY = 0xffffffff
+};
+
+#define IWX_DPRINTF(sc, m, fmt, ...) do { \
+ if (sc->sc_debug & (m)) \
+ device_printf(sc->sc_dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#else
+#define IWX_DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
+#endif
+
+void iwx_dump_cmd(struct iwx_host_cmd *, const char *, int);
+void iwx_bbl_add_entry(uint64_t , int);
+void print_opcode(const char *, int , int , uint32_t);
+
+#endif /* __IF_IWX_DEBUG_H__ */
+#define IWX_BBL_NONE 0x00
+#define IWX_BBL_PKT_TX 0x01
+#define IWX_BBL_PKT_RX 0x02
+#define IWX_BBL_PKT_DUP 0x04
+#define IWX_BBL_CMD_TX 0x10
+#define IWX_BBL_CMD_RX 0x20
+#define IWX_BBL_ANY 0xFF
+
+static const char *
+iwx_bbl_to_str(int type)
+{
+ switch(type) {
+ case IWX_BBL_PKT_TX:
+ return ("IWX_BBL_PKT_TX");
+ case IWX_BBL_PKT_RX:
+ return ("IWX_BBL_PKT_RX");
+ case IWX_BBL_PKT_DUP:
+ return ("IWX_BBL_PKT_DUP");
+ case IWX_BBL_CMD_TX:
+ return ("IWX_BBL_CMD_TX");
+ case IWX_BBL_CMD_RX:
+ return ("IWX_BBL_CMD_RX");
+ case IWX_BBL_ANY:
+ return ("IWX_BBL_ANY");
+ default:
+ return ("ERROR");
+ }
+}
+
+int print_mask = IWX_BBL_NONE; //IWX_BBL_NONE | IWX_BBL_CMD_TX;
+int print_codes[][2] = {
+#if 0
+ for example:
+ IWX_LEGACY_GROUP, IWX_ADD_STA_KEY,
+ IWX_LEGACY_GROUP, IWX_SCD_QUEUE_CONFIG_CMD,
+ IWX_LEGACY_GROUP, IWX_ADD_STA,
+ IWX_LEGACY_GROUP, IWX_REMOVE_STA,
+#endif
+};
+
+int dump_mask = IWX_BBL_NONE;
+int dump_codes[][2] = {
+#if 0
+ for example:
+ IWX_LEGACY_GROUP, IWX_ADD_STA_KEY,
+ IWX_LEGACY_GROUP, IWX_SCD_QUEUE_CONFIG_CMD,
+ IWX_LEGACY_GROUP, IWX_ADD_STA,
+ IWX_LEGACY_GROUP, IWX_REMOVE_STA,
+#endif
+};
+
+struct opcode_label {
+ uint8_t opcode;
+ const char *label;
+};
+
+struct opcode_label command_group[] = {
+ { 0x0, "IWX_LEGACY_GROUP"},
+ { 0x1, "IWX_LONG_GROUP"},
+ { 0x2, "IWX_SYSTEM_GROUP"},
+ { 0x3, "IWX_MAC_CONF_GROUP"},
+ { 0x4, "IWX_PHY_OPS_GROUP"},
+ { 0x5, "IWX_DATA_PATH_GROUP"},
+ { 0xb, "IWX_PROT_OFFLOAD_GROUP"},
+ { 0xc, "IWX_REGULATORY_AND_NVM_GROUP"},
+ { 0, NULL }
+};
+
+struct opcode_label legacy_opcodes[] = {
+ { 0xc0, "IWX_REPLY_RX_PHY_CMD" },
+ { 0xc1, "IWX_REPLY_RX_MPDU_CMD" },
+ { 0xc2, "IWX_BAR_FRAME_RELEASE" },
+ { 0xc3, "IWX_FRAME_RELEASE" },
+ { 0xc5, "IWX_BA_NOTIF" },
+ { 0x62, "IWX_TEMPERATURE_NOTIFICATION" },
+ { 0xc8, "IWX_MCC_UPDATE_CMD" },
+ { 0xc9, "IWX_MCC_CHUB_UPDATE_CMD" },
+ { 0x65, "IWX_CALIBRATION_CFG_CMD" },
+ { 0x66, "IWX_CALIBRATION_RES_NOTIFICATION" },
+ { 0x67, "IWX_CALIBRATION_COMPLETE_NOTIFICATION" },
+ { 0x68, "IWX_RADIO_VERSION_NOTIFICATION" },
+ { 0x00, "IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE" },
+ { 0x01, "IWX_SOC_CONFIGURATION_CMD" },
+ { 0x02, "IWX_REPLY_ERROR" },
+ { 0x03, "IWX_CTDP_CONFIG_CMD" },
+ { 0x04, "IWX_INIT_COMPLETE_NOTIF" },
+ { 0x05, "IWX_SESSION_PROTECTION_CMD" },
+ { 0x5d, "IWX_BT_COEX_CI" },
+ { 0x07, "IWX_FW_ERROR_RECOVERY_CMD" },
+ { 0x08, "IWX_RLC_CONFIG_CMD" },
+ { 0xd0, "IWX_MCAST_FILTER_CMD" },
+ { 0xd1, "IWX_REPLY_SF_CFG_CMD" },
+ { 0xd2, "IWX_REPLY_BEACON_FILTERING_CMD" },
+ { 0xd3, "IWX_D3_CONFIG_CMD" },
+ { 0xd4, "IWX_PROT_OFFLOAD_CONFIG_CMD" },
+ { 0xd5, "IWX_OFFLOADS_QUERY_CMD" },
+ { 0xd6, "IWX_REMOTE_WAKE_CONFIG_CMD" },
+ { 0x77, "IWX_POWER_TABLE_CMD" },
+ { 0x78, "IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION" },
+ { 0xcc, "IWX_BT_COEX_PRIO_TABLE" },
+ { 0xcd, "IWX_BT_COEX_PROT_ENV" },
+ { 0xce, "IWX_BT_PROFILE_NOTIFICATION" },
+ { 0x6a, "IWX_PHY_CONFIGURATION_CMD" },
+ { 0x16, "IWX_RX_BAID_ALLOCATION_CONFIG_CMD" },
+ { 0x17, "IWX_ADD_STA_KEY" },
+ { 0x18, "IWX_ADD_STA" },
+ { 0x19, "IWX_REMOVE_STA" },
+ { 0xe0, "IWX_WOWLAN_PATTERNS" },
+ { 0xe1, "IWX_WOWLAN_CONFIGURATION" },
+ { 0xe2, "IWX_WOWLAN_TSC_RSC_PARAM" },
+ { 0xe3, "IWX_WOWLAN_TKIP_PARAM" },
+ { 0xe4, "IWX_WOWLAN_KEK_KCK_MATERIAL" },
+ { 0xe5, "IWX_WOWLAN_GET_STATUSES" },
+ { 0xe6, "IWX_WOWLAN_TX_POWER_PER_DB" },
+ { 0x0f, "IWX_SCAN_COMPLETE_UMAC" },
+ { 0x88, "IWX_NVM_ACCESS_CMD" },
+ { 0x20, "IWX_WEP_KEY" },
+ { 0xdc, "IWX_CMD_DTS_MEASUREMENT_TRIGGER" },
+ { 0xdd, "IWX_DTS_MEASUREMENT_NOTIFICATION" },
+ { 0x28, "IWX_MAC_CONTEXT_CMD" },
+ { 0x29, "IWX_TIME_EVENT_CMD" },
+ { 0x01, "IWX_ALIVE" },
+ { 0xf0, "IWX_REPLY_DEBUG_CMD" },
+ { 0x90, "IWX_BEACON_NOTIFICATION" },
+ { 0xf5, "IWX_RX_NO_DATA_NOTIF" },
+ { 0x08, "IWX_PHY_CONTEXT_CMD" },
+ { 0x91, "IWX_BEACON_TEMPLATE_CMD" },
+ { 0xf6, "IWX_THERMAL_DUAL_CHAIN_REQUEST" },
+ { 0x09, "IWX_DBG_CFG" },
+ { 0xf7, "IWX_DEBUG_LOG_MSG" },
+ { 0x1c, "IWX_TX_CMD" },
+ { 0x1d, "IWX_SCD_QUEUE_CFG" },
+ { 0x1e, "IWX_TXPATH_FLUSH" },
+ { 0x1f, "IWX_MGMT_MCAST_KEY" },
+ { 0x98, "IWX_TX_ANT_CONFIGURATION_CMD" },
+ { 0xee, "IWX_LTR_CONFIG" },
+ { 0x8e, "IWX_SET_CALIB_DEFAULT_CMD" },
+ { 0xFE, "IWX_CT_KILL_NOTIFICATION" },
+ { 0xFF, "IWX_DTS_MEASUREMENT_NOTIF_WIDE" },
+ { 0x2a, "IWX_TIME_EVENT_NOTIFICATION" },
+ { 0x2b, "IWX_BINDING_CONTEXT_CMD" },
+ { 0x2c, "IWX_TIME_QUOTA_CMD" },
+ { 0x2d, "IWX_NON_QOS_TX_COUNTER_CMD" },
+ { 0xa0, "IWX_CARD_STATE_CMD" },
+ { 0xa1, "IWX_CARD_STATE_NOTIFICATION" },
+ { 0xa2, "IWX_MISSED_BEACONS_NOTIFICATION" },
+ { 0x0c, "IWX_SCAN_CFG_CMD" },
+ { 0x0d, "IWX_SCAN_REQ_UMAC" },
+ { 0xfb, "IWX_SESSION_PROTECTION_NOTIF" },
+ { 0x0e, "IWX_SCAN_ABORT_UMAC" },
+ { 0xfe, "IWX_PNVM_INIT_COMPLETE" },
+ { 0xa9, "IWX_MAC_PM_POWER_TABLE" },
+ { 0xff, "IWX_FSEQ_VER_MISMATCH_NOTIFICATION | IWX_REPLY_MAX" },
+ { 0x9b, "IWX_BT_CONFIG" },
+ { 0x9c, "IWX_STATISTICS_CMD" },
+ { 0x9d, "IWX_STATISTICS_NOTIFICATION" },
+ { 0x9f, "IWX_REDUCE_TX_POWER_CMD" },
+ { 0xb1, "IWX_MFUART_LOAD_NOTIFICATION" },
+ { 0xb5, "IWX_SCAN_ITERATION_COMPLETE_UMAC" },
+ { 0x54, "IWX_NET_DETECT_CONFIG_CMD" },
+ { 0x56, "IWX_NET_DETECT_PROFILES_QUERY_CMD" },
+ { 0x57, "IWX_NET_DETECT_PROFILES_CMD" },
+ { 0x58, "IWX_NET_DETECT_HOTSPOTS_CMD" },
+ { 0x59, "IWX_NET_DETECT_HOTSPOTS_QUERY_CMD" },
+ { 0, NULL }
+};
+
+/* SYSTEM_GROUP group subcommand IDs */
+struct opcode_label system_opcodes[] = {
+ { 0x00, "IWX_SHARED_MEM_CFG_CMD" },
+ { 0x01, "IWX_SOC_CONFIGURATION_CMD" },
+ { 0x03, "IWX_INIT_EXTENDED_CFG_CMD" },
+ { 0x07, "IWX_FW_ERROR_RECOVERY_CMD" },
+ { 0xff, "IWX_FSEQ_VER_MISMATCH_NOTIFICATION | IWX_REPLY_MAX" },
+ { 0, NULL }
+};
+/* MAC_CONF group subcommand IDs */
+struct opcode_label macconf_opcodes[] = {
+ { 0x05, "IWX_SESSION_PROTECTION_CMD" },
+ { 0xfb, "IWX_SESSION_PROTECTION_NOTIF" },
+ { 0, NULL }
+};
+/* DATA_PATH group subcommand IDs */
+struct opcode_label data_opcodes[] = {
+ { 0x00, "IWX_DQA_ENABLE_CMD" },
+ { 0x08, "IWX_RLC_CONFIG_CMD" },
+ { 0x0f, "IWX_TLC_MNG_CONFIG_CMD" },
+ { 0x16, "IWX_RX_BAID_ALLOCATION_CONFIG_CMD" },
+ { 0x17, "IWX_SCD_QUEUE_CONFIG_CMD" },
+ { 0xf5, "IWX_RX_NO_DATA_NOTIF" },
+ { 0xf6, "IWX_THERMAL_DUAL_CHAIN_REQUEST" },
+ { 0xf7, "IWX_TLC_MNG_UPDATE_NOTIF" },
+ { 0, NULL }
+};
+
+/* REGULATORY_AND_NVM group subcommand IDs */
+struct opcode_label reg_opcodes[] = {
+ { 0x00, "IWX_NVM_ACCESS_COMPLETE" },
+ { 0x02, "IWX_NVM_GET_INFO " },
+ { 0xfe, "IWX_PNVM_INIT_COMPLETE" },
+ { 0, NULL }
+};
+
+/* PHY_OPS subcommand IDs */
+struct opcode_label phyops_opcodes[] = {
+ {0x00, "IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE"},
+ {0x03, "IWX_CTDP_CONFIG_CMD"},
+ {0x04, "IWX_TEMP_REPORTING_THRESHOLDS_CMD"},
+ {0xFE, "IWX_CT_KILL_NOTIFICATION"},
+ {0xFF, "IWX_DTS_MEASUREMENT_NOTIF_WIDE"},
+};
+
+static const char *
+get_label(struct opcode_label *table, uint8_t opcode)
+{
+ struct opcode_label *op = table;
+ while(op->label != NULL) {
+ if (op->opcode == opcode)
+ return op->label;
+ op++;
+ }
+ return "NOT FOUND IN TABLE";
+}
+
+static struct opcode_label *
+get_table(uint8_t group)
+{
+ switch (group)
+ {
+ case IWX_LEGACY_GROUP:
+ case IWX_LONG_GROUP:
+ return legacy_opcodes;
+ break;
+ case IWX_SYSTEM_GROUP:
+ return system_opcodes;
+ break;
+ case IWX_MAC_CONF_GROUP:
+ return macconf_opcodes;
+ break;
+ case IWX_DATA_PATH_GROUP:
+ return data_opcodes;
+ break;
+ case IWX_REGULATORY_AND_NVM_GROUP:
+ return reg_opcodes;
+ break;
+ case IWX_PHY_OPS_GROUP:
+ return phyops_opcodes;
+ break;
+ case IWX_PROT_OFFLOAD_GROUP:
+ break;
+ }
+ return NULL;
+}
+
+void
+print_opcode(const char *func, int line, int type, uint32_t code)
+{
+ int print = print_mask & type;
+ uint8_t opcode = iwx_cmd_opcode(code);
+ uint8_t group = iwx_cmd_groupid(code);
+
+ struct opcode_label *table = get_table(group);
+ if (table == NULL) {
+ printf("Couldn't find opcode table for 0x%08x", code);
+ return;
+ }
+
+ for (int i = 0; i < nitems(print_codes); i++)
+ if (print_codes[i][0] == group && print_codes[i][1] == opcode)
+ print = 1;
+
+ if (print) {
+ printf("%s:%d %s\t%s\t%s\t(0x%08x)\n", func, line,
+ iwx_bbl_to_str(type), get_label(command_group, group),
+ get_label(table, opcode), code);
+ }
+}
+
+void
+iwx_dump_cmd(struct iwx_host_cmd *cmd, const char *str, int type)
+{
+ int dump = dump_mask & type;
+ uint8_t opcode = iwx_cmd_opcode(cmd->id);
+ uint8_t group = iwx_cmd_groupid(cmd->id);
+
+ for (int i = 0; i < nitems(dump_codes); i++)
+ if (dump_codes[i][0] == group && dump_codes[i][1] == opcode)
+ dump = 1;
+
+ if (dump)
+ hexdump((const void *)cmd->data[0], cmd->len[0], str, 0);
+}
+
+#define IWX_BBL_ENTRIES 2000
+struct iwx_bbl_entry {
+ uint8_t type;
+ uint64_t code;
+ uint32_t seq;
+ uint32_t ticks;
+ uint32_t count;
+} iwx_bb_log[IWX_BBL_ENTRIES];
+
+uint16_t bbl_idx = 0;
+uint32_t bbl_seq = 0;
+uint8_t compress = 1;
+
+
+void
+iwx_bbl_add_entry(uint64_t code, int type)
+{
+ /*
+ * Compress together repeated notifications, but increment the sequence
+ * number so we can track things processing.
+ */
+ if (compress && (iwx_bb_log[bbl_idx].code == code &&
+ iwx_bb_log[bbl_idx].type == type)) {
+ iwx_bb_log[bbl_idx].count++;
+ iwx_bb_log[bbl_idx].seq = bbl_seq++;
+ iwx_bb_log[bbl_idx].ticks = ticks;
+ return;
+ }
+
+ if (bbl_idx++ > IWX_BBL_ENTRIES) {
+#if 0
+ printf("iwx bbl roll over: type %d (%lu)\n", type, code);
+#endif
+ bbl_idx = 0;
+ }
+ iwx_bb_log[bbl_idx].code = code;
+ iwx_bb_log[bbl_idx].type = type;
+ iwx_bb_log[bbl_idx].seq = bbl_seq++;
+ iwx_bb_log[bbl_idx].ticks = ticks;
+ iwx_bb_log[bbl_idx].count = 1;
+}
+
+static void
+iwx_bbl_print_entry(struct iwx_bbl_entry *e)
+{
+ uint8_t opcode = iwx_cmd_opcode(e->code);
+ uint8_t group = iwx_cmd_groupid(e->code);
+
+ switch(e->type) {
+ case IWX_BBL_PKT_TX:
+ printf("pkt ");
+ printf("seq %08d\t pkt len %ld",
+ e->seq, e->code);
+ break;
+ printf("pkt dup ");
+ printf("seq %08d\t dup count %ld",
+ e->seq, e->code);
+ break;
+ case IWX_BBL_CMD_TX:
+ printf("tx -> ");
+ printf("seq %08d\tcode 0x%08lx (%s:%s)",
+ e->seq, e->code, get_label(command_group, group),
+ get_label(get_table(group), opcode));
+ break;
+ case IWX_BBL_CMD_RX:
+ printf("rx ");
+ printf("seq %08d\tcode 0x%08lx (%s:%s)",
+ e->seq, e->code, get_label(command_group, group),
+ get_label(get_table(group), opcode));
+ break;
+ }
+ if (e->count > 1)
+ printf(" (count %d)", e->count);
+ printf("\n");
+}
+
+static void
+iwx_bbl_print_log(void)
+{
+ int start = -1;
+
+ start = bbl_idx+1;
+ if (start > IWX_BBL_ENTRIES-1)
+ start = 0;
+
+ for (int i = start; i < IWX_BBL_ENTRIES; i++) {
+ struct iwx_bbl_entry *e = &iwx_bb_log[i];
+ printf("bbl entry %05d %05d: ", i, e->ticks);
+ iwx_bbl_print_entry(e);
+ }
+ for (int i = 0; i < start; i++) {
+ struct iwx_bbl_entry *e = &iwx_bb_log[i];
+ printf("bbl entry %05d %05d: ", i, e->ticks);
+ iwx_bbl_print_entry(e);
+ }
+ printf("iwx bblog index %d seq %d\n", bbl_idx, bbl_seq);
+}
diff --git a/sys/dev/iwx/if_iwxreg.h b/sys/dev/iwx/if_iwxreg.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iwx/if_iwxreg.h
@@ -0,0 +1,7924 @@
+/* $OpenBSD: if_iwxreg.h,v 1.51 2023/03/06 11:18:37 stsp Exp $ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+
+/* maximum number of DRAM map entries supported by FW */
+#define IWX_MAX_DRAM_ENTRY 64
+#define IWX_CSR_CTXT_INFO_BA 0x40
+
+/**
+ * enum iwx_context_info_flags - Context information control flags
+ * @IWX_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ * the init done for driver command that configures several system modes
+ * @IWX_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWX_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWX_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * exponent, the actual size is 2**value, valid sizes are 8-2048.
+ * The value is four bits long. Maximum valid exponent is 12
+ * @IWX_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ * default is short format - not supported by the driver)
+ * @IWX_CTXT_INFO_RB_SIZE_POS: RB size position
+ * (values are IWX_CTXT_INFO_RB_SIZE_*K)
+ * @IWX_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
+ */
+enum iwx_context_info_flags {
+ IWX_CTXT_INFO_AUTO_FUNC_INIT = (1 << 0),
+ IWX_CTXT_INFO_EARLY_DEBUG = (1 << 1),
+ IWX_CTXT_INFO_ENABLE_CDMP = (1 << 2),
+ IWX_CTXT_INFO_RB_CB_SIZE_POS = 4,
+ IWX_CTXT_INFO_TFD_FORMAT_LONG = (1 << 8),
+ IWX_CTXT_INFO_RB_SIZE_POS = 9,
+ IWX_CTXT_INFO_RB_SIZE_1K = 0x1,
+ IWX_CTXT_INFO_RB_SIZE_2K = 0x2,
+ IWX_CTXT_INFO_RB_SIZE_4K = 0x4,
+ IWX_CTXT_INFO_RB_SIZE_8K = 0x8,
+ IWX_CTXT_INFO_RB_SIZE_12K = 0x9,
+ IWX_CTXT_INFO_RB_SIZE_16K = 0xa,
+ IWX_CTXT_INFO_RB_SIZE_20K = 0xb,
+ IWX_CTXT_INFO_RB_SIZE_24K = 0xc,
+ IWX_CTXT_INFO_RB_SIZE_28K = 0xd,
+ IWX_CTXT_INFO_RB_SIZE_32K = 0xe,
+};
+
+/*
+ * struct iwx_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwx_context_info_version {
+ uint16_t mac_id;
+ uint16_t version;
+ uint16_t size;
+ uint16_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwx_context_info_flags
+ */
+struct iwx_context_info_control {
+ uint32_t control_flags;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwx_context_info_dram {
+ uint64_t umac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t lmac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t virtual_img[IWX_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwx_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwx_context_info_rbd_cfg {
+ uint64_t free_rbd_addr;
+ uint64_t used_rbd_addr;
+ uint64_t status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwx_context_info_hcmd_cfg - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwx_context_info_hcmd_cfg {
+ uint64_t cmd_queue_addr;
+ uint8_t cmd_queue_size;
+ uint8_t reserved[7];
+} __packed;
+
+/*
+ * struct iwx_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwx_context_info_dump_cfg {
+ uint64_t core_dump_addr;
+ uint32_t core_dump_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwx_context_info_pnvm_cfg {
+ uint64_t platform_nvm_addr;
+ uint32_t platform_nvm_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_early_dbg_cfg - early debug configuration for
+ * dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwx_context_info_early_dbg_cfg {
+ uint64_t early_debug_addr;
+ uint32_t early_debug_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwx_context_info {
+ struct iwx_context_info_version version;
+ struct iwx_context_info_control control;
+ uint64_t reserved0;
+ struct iwx_context_info_rbd_cfg rbd_cfg;
+ struct iwx_context_info_hcmd_cfg hcmd_cfg;
+ uint32_t reserved1[4];
+ struct iwx_context_info_dump_cfg dump_cfg;
+ struct iwx_context_info_early_dbg_cfg edbg_cfg;
+ struct iwx_context_info_pnvm_cfg pnvm_cfg;
+ uint32_t reserved2[16];
+ struct iwx_context_info_dram dram;
+ uint32_t reserved3[16];
+} __packed;
+
+
+/*
+ * Context info definitions for AX210 devices.
+ */
+
+#define IWX_CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define IWX_CSR_CTXT_INFO_ADDR 0x118
+#define IWX_CSR_IML_DATA_ADDR 0x120
+#define IWX_CSR_IML_SIZE_ADDR 0x128
+#define IWX_CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define IWX_CSR_AUTO_FUNC_BOOT_ENA (1 << 1)
+/* Set bit for initiating function boot */
+#define IWX_CSR_AUTO_FUNC_INIT (1 << 7)
+
+/**
+ * iwx_prph_scratch_mtr_format - tfd size configuration
+ * @IWX_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+#define IWX_PRPH_MTR_FORMAT_16B 0x0
+#define IWX_PRPH_MTR_FORMAT_32B 0x40000
+#define IWX_PRPH_MTR_FORMAT_64B 0x80000
+#define IWX_PRPH_MTR_FORMAT_256B 0xC0000
+
+/**
+ * iwx_prph_scratch_flags - PRPH scratch control flags
+ * @IWX_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
+ * @IWX_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWX_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWX_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWX_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
+ * by older firmware versions, so set IWX_PRPH_SCRATCH_RB_SIZE_4K
+ * appropriately; use the below values for this.
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ */
+#define IWX_PRPH_SCRATCH_IMR_DEBUG_EN (1 << 1)
+#define IWX_PRPH_SCRATCH_EARLY_DEBUG_EN (1 << 4)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_DRAM (1 << 8)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_INTERNAL (1 << 9)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER (1 << 10)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_TB22DTF (1 << 11)
+#define IWX_PRPH_SCRATCH_RB_SIZE_4K (1 << 16)
+#define IWX_PRPH_SCRATCH_MTR_MODE (1 << 17)
+#define IWX_PRPH_SCRATCH_MTR_FORMAT ((1 << 18) | (1 << 19))
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_MASK (0xf << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_8K (8 << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_12K (9 << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_16K (10 << 20)
+
+/*
+ * struct iwx_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_version {
+ uint16_t mac_id;
+ uint16_t version;
+ uint16_t size;
+ uint16_t reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwx_prph_scratch_control - control structure
+ * @control_flags: context information flags see &iwx_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_control {
+ uint32_t control_flags;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwx_prph_scratch_pnvm_cfg - ror config
+ * @pnvm_base_addr: PNVM start address
+ * @pnvm_size: PNVM size in DWs
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_pnvm_cfg {
+ uint64_t pnvm_base_addr;
+ uint32_t pnvm_size;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
+
+struct iwx_pnvm_section {
+ uint32_t offset;
+ const uint8_t data[];
+} __packed;
+
+/*
+ * struct iwx_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @debug_token_config: debug preset
+ */
+struct iwx_prph_scratch_hwm_cfg {
+ uint64_t hwm_base_addr;
+ uint32_t hwm_size;
+ uint32_t debug_token_config;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_rbd_cfg {
+ uint64_t free_rbd_addr;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: table size in dwords
+ */
+struct iwx_prph_scratch_uefi_cfg {
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @pnvm_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwx_prph_scratch_ctrl_cfg {
+ struct iwx_prph_scratch_version version;
+ struct iwx_prph_scratch_control control;
+ struct iwx_prph_scratch_pnvm_cfg pnvm_cfg;
+ struct iwx_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwx_prph_scratch_rbd_cfg rbd_cfg;
+ struct iwx_prph_scratch_uefi_cfg reduce_power_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwx_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch {
+ struct iwx_prph_scratch_ctrl_cfg ctrl_cfg;
+ uint32_t reserved[12];
+ struct iwx_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwx_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwx_prph_info {
+ uint32_t boot_stage_mirror;
+ uint32_t ipc_status_mirror;
+ uint32_t sleep_notif;
+ uint32_t reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwx_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwx_context_info_gen3 {
+ uint16_t version;
+ uint16_t size;
+ uint32_t config;
+ uint64_t prph_info_base_addr;
+ uint64_t cr_head_idx_arr_base_addr;
+ uint64_t tr_tail_idx_arr_base_addr;
+ uint64_t cr_tail_idx_arr_base_addr;
+ uint64_t tr_head_idx_arr_base_addr;
+ uint16_t cr_idx_arr_size;
+ uint16_t tr_idx_arr_size;
+ uint64_t mtr_base_addr;
+ uint64_t mcr_base_addr;
+ uint16_t mtr_size;
+ uint16_t mcr_size;
+ uint16_t mtr_doorbell_vec;
+ uint16_t mcr_doorbell_vec;
+ uint16_t mtr_msi_vec;
+ uint16_t mcr_msi_vec;
+ uint8_t mtr_opt_header_size;
+ uint8_t mtr_opt_footer_size;
+ uint8_t mcr_opt_header_size;
+ uint8_t mcr_opt_footer_size;
+ uint16_t msg_rings_ctrl_flags;
+ uint16_t prph_info_msi_vec;
+ uint64_t prph_scratch_base_addr;
+ uint32_t prph_scratch_size;
+ uint32_t reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+#define IWX_MGMT_TID 15
+
+#define IWX_MQ_RX_TABLE_SIZE 512
+
+/* cb size is the exponent */
+#define IWX_RX_QUEUE_CB_SIZE(x) ((sizeof(x) <= 4) ? (fls(x) - 1) : (flsl(x) - 1))
+
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. IWX_CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via IWX_CSR_EEPROM and IWX_CSR_OTP registers
+ */
+#define IWX_CSR_HW_IF_CONFIG_REG (0x000) /* hardware interface config */
+#define IWX_CSR_INT_COALESCING (0x004) /* accum ints, 32-usec units */
+#define IWX_CSR_INT (0x008) /* host interrupt status/ack */
+#define IWX_CSR_INT_MASK (0x00c) /* host interrupt enable */
+#define IWX_CSR_FH_INT_STATUS (0x010) /* busmaster int status/ack*/
+#define IWX_CSR_GPIO_IN (0x018) /* read external chip pins */
+#define IWX_CSR_RESET (0x020) /* busmaster enable, NMI, etc*/
+#define IWX_CSR_GP_CNTRL (0x024)
+
+/* 2nd byte of IWX_CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define IWX_CSR_INT_PERIODIC_REG (0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-4: Type of device: see IWX_CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ */
+#define IWX_CSR_HW_REV (0x028)
+
+/*
+ * RF ID revision info
+ * Bit fields:
+ * 31:24: Reserved (set to 0x0)
+ * 23:12: Type
+ * 11:8: Step (A - 0x0, B - 0x1, etc)
+ * 7:4: Dash
+ * 3:0: Flavor
+ */
+#define IWX_CSR_HW_RF_ID (0x09c)
+
+
+#define IWX_CSR_GIO_REG (0x03C)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define IWX_CSR_UCODE_DRV_GP1 (0x054)
+#define IWX_CSR_UCODE_DRV_GP1_SET (0x058)
+#define IWX_CSR_UCODE_DRV_GP1_CLR (0x05c)
+#define IWX_CSR_UCODE_DRV_GP2 (0x060)
+
+#define IWX_CSR_MBOX_SET_REG (0x088)
+#define IWX_CSR_MBOX_SET_REG_OS_ALIVE 0x20
+
+#define IWX_CSR_DRAM_INT_TBL_REG (0x0A0)
+#define IWX_CSR_MAC_SHADOW_REG_CTRL (0x0A8) /* 6000 and up */
+
+/* LTR control */
+#define IWX_CSR_LTR_LONG_VAL_AD (0x0d4)
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK 0x1c000000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT 24
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK 0x03ff0000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT 16
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK 0x00001c00
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT 8
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff
+#define IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC 2
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS (0x100)
+
+#define IWX_CSR_DBG_HPET_MEM_REG (0x240)
+#define IWX_CSR_DBG_LINK_PWR_MGMT_REG (0x250)
+
+/* Bits for IWX_CSR_HW_IF_CONFIG_REG */
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define IWX_CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
+#define IWX_CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+
+#define IWX_CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define IWX_CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define IWX_CSR_INT_BIT_FH_RX (1U << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define IWX_CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define IWX_CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define IWX_CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define IWX_CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define IWX_CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define IWX_CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define IWX_CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define IWX_CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define IWX_CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define IWX_CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define IWX_CSR_INI_SET_MASK (IWX_CSR_INT_BIT_FH_RX | \
+ IWX_CSR_INT_BIT_HW_ERR | \
+ IWX_CSR_INT_BIT_FH_TX | \
+ IWX_CSR_INT_BIT_SW_ERR | \
+ IWX_CSR_INT_BIT_RF_KILL | \
+ IWX_CSR_INT_BIT_SW_RX | \
+ IWX_CSR_INT_BIT_WAKEUP | \
+ IWX_CSR_INT_BIT_ALIVE | \
+ IWX_CSR_INT_BIT_RX_PERIODIC)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define IWX_CSR_FH_INT_BIT_ERR (1U << 31) /* Error */
+#define IWX_CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define IWX_CSR_FH_INT_RX_MASK (IWX_CSR_FH_INT_BIT_HI_PRIOR | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL0)
+
+#define IWX_CSR_FH_INT_TX_MASK (IWX_CSR_FH_INT_BIT_TX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_TX_CHNL0)
+
+/**
+ * struct iwx_rx_transfer_desc - transfer descriptor AX210
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwx_rx_transfer_desc {
+ uint16_t rbid;
+ uint16_t reserved[3];
+ uint64_t addr;
+};
+
+#define IWX_RX_CD_FLAGS_FRAGMENTED (1 << 0)
+
+/**
+ * struct iwx_rx_completion_desc - completion descriptor AX210
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved2: reserved
+ */
+struct iwx_rx_completion_desc {
+ uint32_t reserved1;
+ uint16_t rbid;
+ uint8_t flags;
+ uint8_t reserved2[25];
+};
+
+/* RESET */
+#define IWX_CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define IWX_CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define IWX_CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define IWX_CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define IWX_CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* HW REV */
+#define IWX_CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define IWX_CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+#define IWX_CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
+
+#define IWX_CSR_HW_REV_TYPE_MSK (0x000FFF0)
+#define IWX_CSR_HW_REV_TYPE_QU_B0 (0x0000334)
+#define IWX_CSR_HW_REV_TYPE_QU_C0 (0x0000338)
+#define IWX_CSR_HW_REV_TYPE_QUZ (0x0000354)
+#define IWX_CSR_HW_REV_TYPE_SO (0x0000370)
+#define IWX_CSR_HW_REV_TYPE_TY (0x0000420)
+
+/* HW RFID */
+#define IWX_CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
+#define IWX_CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
+#define IWX_CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
+#define IWX_CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
+#define IWX_CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
+#define IWX_CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
+
+/* CSR GIO */
+#define IWX_CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: device saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define IWX_CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define IWX_CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* HPET MEM debug */
+#define IWX_CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define IWX_CSR_DRAM_INT_TBL_ENABLE (1U << 31)
+#define IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
+#define IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/* 22000 configuration registers */
+
+/*
+ * TFH Configuration register.
+ *
+ * BIT fields:
+ *
+ * Bits 3:0:
+ * Define the maximum number of pending read requests.
+ * Maximum configuration value allowed is 0xC
+ * Bits 9:8:
+ * Define the maximum transfer size. (64 / 128 / 256)
+ * Bit 10:
+ * When bit is set and transfer size is set to 128B, the TFH will enable
+ * reading chunks of more than 64B only if the read address is aligned to 128B.
+ * In case of DRAM read address which is not aligned to 128B, the TFH will
+ * enable transfer size which doesn't cross 64B DRAM address boundary.
+*/
+#define IWX_TFH_TRANSFER_MODE (0x1F40)
+#define IWX_TFH_TRANSFER_MAX_PENDING_REQ 0xc
+#define IWX_TFH_CHUNK_SIZE_128 (1 << 8)
+#define IWX_TFH_CHUNK_SPLIT_MODE (1 << 10)
+
+/*
+ * Defines the offset address in dwords referring from the beginning of the
+ * Tx CMD which will be updated in DRAM.
+ * Note that the TFH offset address for Tx CMD update is always referring to
+ * the start of the TFD first TB.
+ * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
+ */
+#define IWX_TFH_TXCMD_UPDATE_CFG (0x1F48)
+
+/*
+ * Controls TX DMA operation
+ *
+ * BIT fields:
+ *
+ * Bits 31:30: Enable the SRAM DMA channel.
+ * Turning on bit 31 will kick the SRAM2DRAM DMA.
+ * Note that the sram2dram may be enabled only after configuring the DRAM and
+ * SRAM addresses registers and the byte count register.
+ * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
+ * set to 1 - interrupt is sent to the driver
+ * Bit 0: Indicates the snoop configuration
+*/
+#define IWX_TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
+#define IWX_TFH_SRV_DMA_SNOOP (1 << 0)
+#define IWX_TFH_SRV_DMA_TO_DRIVER (1 << 24)
+#define IWX_TFH_SRV_DMA_START (1U << 31)
+
+/* Defines the DMA SRAM write start address to transfer a data block */
+#define IWX_TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
+
+/* Defines the 64bits DRAM start address to read the DMA data block from */
+#define IWX_TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
+
+/*
+ * Defines the number of bytes to transfer from DRAM to SRAM.
+ * Note that this register may be configured with non-dword aligned size.
+ */
+#define IWX_TFH_SRV_DMA_CHNL0_BC (0x1F70)
+
+/* 9000 rx series registers */
+
+#define IWX_RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define IWX_RFH_Q_FRBDCB_BA_LSB(q) (IWX_RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_FRBDCB_WIDX 0xA08080
+#define IWX_RFH_Q_FRBDCB_WIDX(q) (IWX_RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define IWX_RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define IWX_RFH_Q_FRBDCB_WIDX_TRG(q) (IWX_RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
+/* Read index table */
+#define IWX_RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define IWX_RFH_Q_FRBDCB_RIDX(q) (IWX_RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define IWX_RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define IWX_RFH_Q_URBDCB_BA_LSB(q) (IWX_RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_URBDCB_WIDX 0xA08180
+#define IWX_RFH_Q_URBDCB_WIDX(q) (IWX_RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define IWX_RFH_Q0_URBDCB_VAID 0xA081C0
+#define IWX_RFH_Q_URBDCB_VAID(q) (IWX_RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define IWX_RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define IWX_RFH_Q_URBD_STTS_WPTR_LSB(q) (IWX_RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define IWX_RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define IWX_RFH_Q_ORB_WPTR_LSB(q) (IWX_RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define IWX_RFH_RBDBUF_RBD0_LSB 0xA08300
+#define IWX_RFH_RBDBUF_RBD_LSB(q) (IWX_RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/**
+ * RFH Status Register
+ *
+ * Bit fields:
+ *
+ * Bit 29: RBD_FETCH_IDLE
+ * This status flag is set by the RFH when there is no active RBD fetch from
+ * DRAM.
+ * Once the RFH RBD controller starts fetching (or when there is a pending
+ * RBD read response from DRAM), this flag is immediately turned off.
+ *
+ * Bit 30: SRAM_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * SRAM to DRAM.
+ * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
+ *
+ * Bit 31: RXF_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * RXF to DRAM.
+ * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
+ */
+#define IWX_RFH_GEN_STATUS 0xA09808
+#define IWX_RFH_GEN_STATUS_GEN3 0xA07824
+#define IWX_RBD_FETCH_IDLE (1 << 29)
+#define IWX_SRAM_DMA_IDLE (1 << 30)
+#define IWX_RXF_DMA_IDLE (1U << 31)
+
+/* DMA configuration */
+#define IWX_RFH_RXF_DMA_CFG 0xA09820
+#define IWX_RFH_RXF_DMA_CFG_GEN3 0xA07880
+/* RB size */
+#define IWX_RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define IWX_RFH_RXF_DMA_RB_SIZE_POS 16
+#define IWX_RFH_RXF_DMA_RB_SIZE_1K (0x1 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_2K (0x2 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_4K (0x4 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_8K (0x8 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_12K (0x9 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_16K (0xA << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_20K (0xB << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_24K (0xC << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_28K (0xD << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_32K (0xE << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS 24
+#define IWX_RFH_RXF_DMA_MIN_RB_4_8 (3 << IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
+#define IWX_RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define IWX_RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define IWX_RFH_DMA_EN_ENABLE_VAL (1U << 31)
+
+#define IWX_RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define IWX_RFH_GEN_CFG 0xA09800
+#define IWX_RFH_GEN_CFG_SERVICE_DMA_SNOOP (1 << 0)
+#define IWX_RFH_GEN_CFG_RFH_DMA_SNOOP (1 << 1)
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_128 0x00000010
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_64 0x00000000
+/* the driver assumes everywhere that the default RXQ is 0 */
+#define IWX_RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00
+
+/* end of 9000 rx series registers */
+
+/*
+ * This register is written by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define IWX_UREG_UCODE_LOAD_STATUS (0xa05c40)
+#define IWX_UREG_CPU_INIT_RUN (0xa05c44)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define IWX_HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define IWX_HBUS_TARG_MEM_RADDR (IWX_HBUS_BASE+0x00c)
+#define IWX_HBUS_TARG_MEM_WADDR (IWX_HBUS_BASE+0x010)
+#define IWX_HBUS_TARG_MEM_WDAT (IWX_HBUS_BASE+0x018)
+#define IWX_HBUS_TARG_MEM_RDAT (IWX_HBUS_BASE+0x01c)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define IWX_HBUS_TARG_PRPH_WADDR (IWX_HBUS_BASE+0x044)
+#define IWX_HBUS_TARG_PRPH_RADDR (IWX_HBUS_BASE+0x048)
+#define IWX_HBUS_TARG_PRPH_WDAT (IWX_HBUS_BASE+0x04c)
+#define IWX_HBUS_TARG_PRPH_RDAT (IWX_HBUS_BASE+0x050)
+
+/* enable the ID buf for read */
+#define IWX_WFPM_PS_CTL_CLR 0xa0300c
+#define IWX_WFMP_MAC_ADDR_0 0xa03080
+#define IWX_WFMP_MAC_ADDR_1 0xa03084
+#define IWX_LMPM_PMG_EN 0xa01cec
+#define IWX_RADIO_REG_SYS_MANUAL_DFT_0 0xad4078
+#define IWX_RFIC_REG_RD 0xad0470
+#define IWX_WFPM_CTRL_REG 0xa03030
+#define IWX_WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK 0x08000000
+#define IWX_ENABLE_WFPM 0x80000000
+
+#define IWX_AUX_MISC_MASTER1_EN 0xa20818
+#define IWX_AUX_MISC_MASTER1_EN_SBE_MSK 0x1
+#define IWX_AUX_MISC_MASTER1_SMPHR_STATUS 0xa20800
+#define IWX_RSA_ENABLE 0xa24b08
+#define IWX_PREG_AUX_BUS_WPROT_0 0xa04cc0
+#define IWX_PREG_PRPH_WPROT_9000 0xa04ce0
+#define IWX_PREG_PRPH_WPROT_22000 0xa04d00
+#define IWX_SB_CFG_OVERRIDE_ADDR 0xa26c78
+#define IWX_SB_CFG_OVERRIDE_ENABLE 0x8000
+#define IWX_SB_CFG_BASE_OVERRIDE 0xa20000
+#define IWX_SB_MODIFY_CFG_FLAG 0xa03088
+#define IWX_UMAG_SB_CPU_1_STATUS 0xa038c0
+#define IWX_UMAG_SB_CPU_2_STATUS 0xa038c4
+
+#define IWX_UREG_CHICK 0xa05c00
+#define IWX_UREG_CHICK_MSI_ENABLE (1 << 24)
+#define IWX_UREG_CHICK_MSIX_ENABLE (1 << 25)
+
+#define IWX_HPM_DEBUG 0xa03440
+#define IWX_PERSISTENCE_BIT (1 << 12)
+#define IWX_PREG_WFPM_ACCESS (1 << 12)
+
+#define IWX_HPM_HIPM_GEN_CFG 0xa03458
+#define IWX_HPM_HIPM_GEN_CFG_CR_PG_EN (1 << 0)
+#define IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN (1 << 1)
+#define IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE (1 << 10)
+
+#define IWX_UREG_DOORBELL_TO_ISR6 0xa05c04
+#define IWX_UREG_DOORBELL_TO_ISR6_NMI_BIT (1 << 0)
+#define IWX_UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE ((1 << 0) | (1 << 1))
+#define IWX_UREG_DOORBELL_TO_ISR6_SUSPEND (1 << 18)
+#define IWX_UREG_DOORBELL_TO_ISR6_RESUME (1 << 19)
+#define IWX_UREG_DOORBELL_TO_ISR6_PNVM (1 << 20)
+
+/* LTR control (Qu only) */
+#define IWX_HPM_MAC_LTR_CSR 0xa0348c
+#define IWX_HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define IWX_HPM_UMAC_LTR 0xa03480
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define IWX_HBUS_TARG_WRPTR (IWX_HBUS_BASE+0x060)
+
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ */
+#define IWX_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWX_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWX_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWX_HOST_INT_OPER_MODE (1U << 31)
+
+/*****************************************************************************
+ * MSIX related registers *
+ *****************************************************************************/
+
+#define IWX_CSR_MSIX_BASE (0x2000)
+#define IWX_CSR_MSIX_FH_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x800)
+#define IWX_CSR_MSIX_FH_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x804)
+#define IWX_CSR_MSIX_HW_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x808)
+#define IWX_CSR_MSIX_HW_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x80C)
+#define IWX_CSR_MSIX_AUTOMASK_ST_AD (IWX_CSR_MSIX_BASE + 0x810)
+#define IWX_CSR_MSIX_RX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x880)
+#define IWX_CSR_MSIX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x890)
+#define IWX_CSR_MSIX_PENDING_PBA_AD (IWX_CSR_MSIX_BASE + 0x1000)
+#define IWX_CSR_MSIX_RX_IVAR(cause) (IWX_CSR_MSIX_RX_IVAR_AD_REG + (cause))
+#define IWX_CSR_MSIX_IVAR(cause) (IWX_CSR_MSIX_IVAR_AD_REG + (cause))
+
+/*
+ * Causes for the FH register interrupts
+ */
+enum msix_fh_int_causes {
+ IWX_MSIX_FH_INT_CAUSES_Q0 = (1 << 0),
+ IWX_MSIX_FH_INT_CAUSES_Q1 = (1 << 1),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM = (1 << 16),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM = (1 << 17),
+ IWX_MSIX_FH_INT_CAUSES_S2D = (1 << 19),
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR = (1 << 21),
+};
+
+/*
+ * Causes for the HW register interrupts
+ */
+enum mix_hw_int_causes {
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE = (1 << 0),
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP = (1 << 1),
+ IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE = (1 << 2),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = (1 << 5),
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL = (1 << 6),
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL = (1 << 7),
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC = (1 << 8),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR = (1 << 25),
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD = (1 << 26),
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX = (1 << 27),
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR = (1 << 29),
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP = (1 << 30),
+};
+
+/*
+ * Registers to map causes to vectors
+ */
+enum msix_ivar_for_cause {
+ IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM = 0x0,
+ IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM = 0x1,
+ IWX_MSIX_IVAR_CAUSE_S2D = 0x3,
+ IWX_MSIX_IVAR_CAUSE_FH_ERR = 0x5,
+ IWX_MSIX_IVAR_CAUSE_REG_ALIVE = 0x10,
+ IWX_MSIX_IVAR_CAUSE_REG_WAKEUP = 0x11,
+ IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE = 0x12,
+ IWX_MSIX_IVAR_CAUSE_REG_CT_KILL = 0x16,
+ IWX_MSIX_IVAR_CAUSE_REG_RF_KILL = 0x17,
+ IWX_MSIX_IVAR_CAUSE_REG_PERIODIC = 0x18,
+ IWX_MSIX_IVAR_CAUSE_REG_SW_ERR = 0x29,
+ IWX_MSIX_IVAR_CAUSE_REG_SCD = 0x2a,
+ IWX_MSIX_IVAR_CAUSE_REG_FH_TX = 0x2b,
+ IWX_MSIX_IVAR_CAUSE_REG_HW_ERR = 0x2d,
+ IWX_MSIX_IVAR_CAUSE_REG_HAP = 0x2e,
+};
+
+#define IWX_MSIX_AUTO_CLEAR_CAUSE (0 << 7)
+#define IWX_MSIX_NON_AUTO_CLEAR_CAUSE (1 << 7)
+
+#define IWX_CSR_ADDR_BASE(sc) ((sc)->mac_addr_from_csr)
+#define IWX_CSR_MAC_ADDR0_OTP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x00)
+#define IWX_CSR_MAC_ADDR1_OTP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x04)
+#define IWX_CSR_MAC_ADDR0_STRAP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x08)
+#define IWX_CSR_MAC_ADDR1_STRAP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x0c)
+
+/**
+ * uCode API flags
+ * @IWX_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWX_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWX_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWX_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWX_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWX_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWX_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWX_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWX_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWX_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ *
+ */
+#define IWX_UCODE_TLV_FLAGS_PAN (1 << 0)
+#define IWX_UCODE_TLV_FLAGS_NEWSCAN (1 << 1)
+#define IWX_UCODE_TLV_FLAGS_MFP (1 << 2)
+#define IWX_UCODE_TLV_FLAGS_P2P (1 << 3)
+#define IWX_UCODE_TLV_FLAGS_DW_BC_TABLE (1 << 4)
+#define IWX_UCODE_TLV_FLAGS_SHORT_BL (1 << 7)
+#define IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS (1 << 10)
+#define IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID (1 << 12)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL (1 << 15)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE (1 << 16)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS (1 << 21)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM (1 << 22)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM (1 << 23)
+#define IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT (1 << 24)
+#define IWX_UCODE_TLV_FLAGS_EBS_SUPPORT (1 << 25)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD (1 << 26)
+#define IWX_UCODE_TLV_FLAGS_BCAST_FILTERING (1 << 29)
+#define IWX_UCODE_TLV_FLAGS_GO_UAPSD (1 << 30)
+#define IWX_UCODE_TLV_FLAGS_LTE_COEX (1U << 31)
+
+#define IWX_UCODE_TLV_FLAG_BITS \
+ "\020\1PAN\2NEWSCAN\3MFP\4P2P\5DW_BC_TABLE\6NEWBT_COEX\7PM_CMD\10SHORT_BL\11RX_ENERGY\12TIME_EVENT_V2\13D3_6_IPV6\14BF_UPDATED\15NO_BASIC_SSID\17D3_CONTINUITY\20NEW_NSOFFL_S\21NEW_NSOFFL_L\22SCHED_SCAN\24STA_KEY_CMD\25DEVICE_PS_CMD\26P2P_PS\27P2P_PS_DCM\30P2P_PS_SCM\31UAPSD_SUPPORT\32EBS\33P2P_PS_UAPSD\36BCAST_FILTERING\37GO_UAPSD\40LTE_COEX"
+
+/**
+ * uCode TLV api
+ * @IWX_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ * @IWX_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWX_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
+ * @IWX_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWX_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWX_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ * (command version 3) that supports per-chain limits
+ * @IWX_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
+ * @IWX_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWX_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignment.
+ * @IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
+ * @IWX_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
+ * @IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
+ *
+ * @IWX_NUM_UCODE_TLV_API: number of bits used
+ */
+#define IWX_UCODE_TLV_API_FRAGMENTED_SCAN 8
+#define IWX_UCODE_TLV_API_WIFI_MCC_UPDATE 9
+#define IWX_UCODE_TLV_API_WIDE_CMD_HDR 14
+#define IWX_UCODE_TLV_API_LQ_SS_PARAMS 18
+#define IWX_UCODE_TLV_API_NEW_VERSION 20
+#define IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY 24
+#define IWX_UCODE_TLV_API_TX_POWER_CHAIN 27
+#define IWX_UCODE_TLV_API_SCAN_TSF_REPORT 28
+#define IWX_UCODE_TLV_API_TKIP_MIC_KEYS 29
+#define IWX_UCODE_TLV_API_STA_TYPE 30
+#define IWX_UCODE_TLV_API_NAN2_VER2 31
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL 32
+#define IWX_UCODE_TLV_API_NEW_RX_STATS 35
+#define IWX_UCODE_TLV_API_WOWLAN_KEY_MATERIAL 36
+#define IWX_UCODE_TLV_API_QUOTA_LOW_LATENCY 38
+#define IWX_UCODE_TLV_API_DEPRECATE_TTAK 41
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2 42
+#define IWX_UCODE_TLV_API_NAN_NOTIF_V2 43
+#define IWX_UCODE_TLV_API_FRAG_EBS 44
+#define IWX_UCODE_TLV_API_REDUCE_TX_POWER 45
+#define IWX_UCODE_TLV_API_SHORT_BEACON_NOTIF 46
+#define IWX_UCODE_TLV_API_BEACON_FILTER_V4 47
+#define IWX_UCODE_TLV_API_REGULATORY_NVM_INFO 48
+#define IWX_UCODE_TLV_API_FTM_NEW_RANGE_REQ 49
+#define IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG 56
+#define IWX_UCODE_TLV_API_SCAN_OFFLOAD_CHANS 50
+#define IWX_UCODE_TLV_API_MBSSID_HE 52
+#define IWX_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE 53
+#define IWX_UCODE_TLV_API_FTM_RTT_ACCURACY 54
+#define IWX_UCODE_TLV_API_SAR_TABLE_VER 55
+#define IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG 56
+#define IWX_UCODE_TLV_API_ADWELL_HB_DEF_N_AP 57
+#define IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER 58
+#define IWX_UCODE_TLV_API_BAND_IN_RX_DATA 59
+#define IWX_NUM_UCODE_TLV_API 128
+
+#define IWX_UCODE_TLV_API_BITS \
+ "\020\10FRAGMENTED_SCAN\11WIFI_MCC_UPDATE\16WIDE_CMD_HDR\22LQ_SS_PARAMS\30EXT_SCAN_PRIO\33TX_POWER_CHAIN\35TKIP_MIC_KEYS"
+
+/**
+ * uCode capabilities
+ * @IWX_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWX_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWX_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWX_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
+ * @IWX_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWX_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWX_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT: supports 2G coex Command
+ * @IWX_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWX_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWX_UCODE_TLV_CAPA_NAN_SUPPORT: supports NAN
+ * @IWX_UCODE_TLV_CAPA_UMAC_UPLOAD: supports upload mode in umac (1=supported,
+ * 0=no support)
+ * @IWx_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
+ * @IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ * antenna the beacon should be transmitted
+ * @IWX_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWX_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
+ * @IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWX_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWX_UCODE_TLV_CAPA_LMAC_UPLOAD: supports upload mode in lmac (1=supported,
+ * 0=no support)
+ *
+ * @IWX_NUM_UCODE_TLV_CAPA: number of bits used
+ */
+#define IWX_UCODE_TLV_CAPA_D0I3_SUPPORT 0
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT 1
+#define IWX_UCODE_TLV_CAPA_UMAC_SCAN 2
+#define IWX_UCODE_TLV_CAPA_BEAMFORMER 3
+#define IWX_UCODE_TLV_CAPA_TOF_SUPPORT 5
+#define IWX_UCODE_TLV_CAPA_TDLS_SUPPORT 6
+#define IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT 8
+#define IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT 9
+#define IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT 10
+#define IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT 11
+#define IWX_UCODE_TLV_CAPA_DQA_SUPPORT 12
+#define IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH 13
+#define IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG 17
+#define IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT 18
+#define IWX_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT 19
+#define IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT 20
+#define IWX_UCODE_TLV_CAPA_CSUM_SUPPORT 21
+#define IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS 22
+#define IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD 26
+#define IWX_UCODE_TLV_CAPA_BT_COEX_PLCR 28
+#define IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC 29
+#define IWX_UCODE_TLV_CAPA_BT_COEX_RRC 30
+#define IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT 31
+#define IWX_UCODE_TLV_CAPA_NAN_SUPPORT 34
+#define IWX_UCODE_TLV_CAPA_UMAC_UPLOAD 35
+#define IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT 37
+#define IWX_UCODE_TLV_CAPA_STA_PM_NOTIF 38
+#define IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT 39
+#define IWX_UCODE_TLV_CAPA_CDB_SUPPORT 40
+#define IWX_UCODE_TLV_CAPA_D0I3_END_FIRST 41
+#define IWX_UCODE_TLV_CAPA_TLC_OFFLOAD 43
+#define IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA 44
+#define IWX_UCODE_TLV_CAPA_COEX_SCHEMA_2 45
+#define IWX_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD 46
+#define IWX_UCODE_TLV_CAPA_FTM_CALIBRATED 47
+#define IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS 48
+#define IWX_UCODE_TLV_CAPA_CS_MODIFY 49
+#define IWX_UCODE_TLV_CAPA_SET_LTR_GEN2 50
+#define IWX_UCODE_TLV_CAPA_SET_PPAG 52
+#define IWX_UCODE_TLV_CAPA_TAS_CFG 53
+#define IWX_UCODE_TLV_CAPA_SESSION_PROT_CMD 54
+#define IWX_UCODE_TLV_CAPA_PROTECTED_TWT 56
+#define IWX_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE 57
+#define IWX_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN 58
+#define IWX_UCODE_TLV_CAPA_PROTECTED_TWT 56
+#define IWX_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE 57
+#define IWX_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN 58
+#define IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT 63
+#define IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE 64
+#define IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS 65
+#define IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT 67
+#define IWX_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT 68
+#define IWX_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD 70
+#define IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION 71
+#define IWX_UCODE_TLV_CAPA_BEACON_STORING 72
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V3 73
+#define IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW 74
+#define IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT 75
+#define IWX_UCODE_TLV_CAPA_CTDP_SUPPORT 76
+#define IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED 77
+#define IWX_UCODE_TLV_CAPA_LMAC_UPLOAD 79
+#define IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG 80
+#define IWX_UCODE_TLV_CAPA_LQM_SUPPORT 81
+#define IWX_UCODE_TLV_CAPA_TX_POWER_ACK 84
+#define IWX_UCODE_TLV_CAPA_D3_DEBUG 87
+#define IWX_UCODE_TLV_CAPA_LED_CMD_SUPPORT 88
+#define IWX_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT 89
+#define IWX_UCODE_TLV_CAPA_CSI_REPORTING 90
+#define IWX_UCODE_TLV_CAPA_CSI_REPORTING_V2 91
+#define IWX_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP 92
+#define IWX_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP 93
+#define IWX_UCODE_TLV_CAPA_MLME_OFFLOAD 96
+#define IWX_UCODE_TLV_CAPA_BIGTK_SUPPORT 100
+#define IWX_UCODE_TLV_CAPA_RFIM_SUPPORT 102
+
+#define IWX_NUM_UCODE_TLV_CAPA 128
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWX_CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define IWX_PAGING_SEPARATOR_SECTION 0xAAAABBBB
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWX_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWX_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWX_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWX_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_tlv_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+#define IWX_FW_PHY_CFG_RADIO_TYPE_POS 0
+#define IWX_FW_PHY_CFG_RADIO_TYPE (0x3 << IWX_FW_PHY_CFG_RADIO_TYPE_POS)
+#define IWX_FW_PHY_CFG_RADIO_STEP_POS 2
+#define IWX_FW_PHY_CFG_RADIO_STEP (0x3 << IWX_FW_PHY_CFG_RADIO_STEP_POS)
+#define IWX_FW_PHY_CFG_RADIO_DASH_POS 4
+#define IWX_FW_PHY_CFG_RADIO_DASH (0x3 << IWX_FW_PHY_CFG_RADIO_DASH_POS)
+#define IWX_FW_PHY_CFG_TX_CHAIN_POS 16
+#define IWX_FW_PHY_CFG_TX_CHAIN (0xf << IWX_FW_PHY_CFG_TX_CHAIN_POS)
+#define IWX_FW_PHY_CFG_RX_CHAIN_POS 20
+#define IWX_FW_PHY_CFG_RX_CHAIN (0xf << IWX_FW_PHY_CFG_RX_CHAIN_POS)
+
+/**
+ * struct iwx_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwx_fw_cipher_scheme {
+ uint32_t cipher;
+ uint8_t flags;
+ uint8_t hdr_len;
+ uint8_t pn_len;
+ uint8_t pn_off;
+ uint8_t key_idx_off;
+ uint8_t key_idx_mask;
+ uint8_t key_idx_shift;
+ uint8_t mic_len;
+ uint8_t hw_cipher;
+} __packed;
+
+/**
+ * struct iwx_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwx_fw_cscheme_list {
+ uint8_t size;
+ struct iwx_fw_cipher_scheme cs[];
+} __packed;
+
+/* v1/v2 uCode file layout */
+struct iwx_ucode_header {
+ uint32_t ver; /* major/minor/API/serial */
+ union {
+ struct {
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ uint32_t build; /* build number */
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+#define IWX_UCODE_TLV_INVALID 0 /* unused */
+#define IWX_UCODE_TLV_INST 1
+#define IWX_UCODE_TLV_DATA 2
+#define IWX_UCODE_TLV_INIT 3
+#define IWX_UCODE_TLV_INIT_DATA 4
+#define IWX_UCODE_TLV_BOOT 5
+#define IWX_UCODE_TLV_PROBE_MAX_LEN 6 /* a uint32_t value */
+#define IWX_UCODE_TLV_PAN 7
+#define IWX_UCODE_TLV_RUNT_EVTLOG_PTR 8
+#define IWX_UCODE_TLV_RUNT_EVTLOG_SIZE 9
+#define IWX_UCODE_TLV_RUNT_ERRLOG_PTR 10
+#define IWX_UCODE_TLV_INIT_EVTLOG_PTR 11
+#define IWX_UCODE_TLV_INIT_EVTLOG_SIZE 12
+#define IWX_UCODE_TLV_INIT_ERRLOG_PTR 13
+#define IWX_UCODE_TLV_ENHANCE_SENS_TBL 14
+#define IWX_UCODE_TLV_PHY_CALIBRATION_SIZE 15
+#define IWX_UCODE_TLV_WOWLAN_INST 16
+#define IWX_UCODE_TLV_WOWLAN_DATA 17
+#define IWX_UCODE_TLV_FLAGS 18
+#define IWX_UCODE_TLV_SEC_RT 19
+#define IWX_UCODE_TLV_SEC_INIT 20
+#define IWX_UCODE_TLV_SEC_WOWLAN 21
+#define IWX_UCODE_TLV_DEF_CALIB 22
+#define IWX_UCODE_TLV_PHY_SKU 23
+#define IWX_UCODE_TLV_SECURE_SEC_RT 24
+#define IWX_UCODE_TLV_SECURE_SEC_INIT 25
+#define IWX_UCODE_TLV_SECURE_SEC_WOWLAN 26
+#define IWX_UCODE_TLV_NUM_OF_CPU 27
+#define IWX_UCODE_TLV_CSCHEME 28
+#define IWX_UCODE_TLV_API_CHANGES_SET 29
+#define IWX_UCODE_TLV_ENABLED_CAPABILITIES 30
+#define IWX_UCODE_TLV_N_SCAN_CHANNELS 31
+#define IWX_UCODE_TLV_PAGING 32
+#define IWX_UCODE_TLV_SEC_RT_USNIFFER 34
+#define IWX_UCODE_TLV_SDIO_ADMA_ADDR 35
+#define IWX_UCODE_TLV_FW_VERSION 36
+#define IWX_UCODE_TLV_FW_DBG_DEST 38
+#define IWX_UCODE_TLV_FW_DBG_CONF 39
+#define IWX_UCODE_TLV_FW_DBG_TRIGGER 40
+#define IWX_UCODE_TLV_CMD_VERSIONS 48
+#define IWX_UCODE_TLV_FW_GSCAN_CAPA 50
+#define IWX_UCODE_TLV_FW_MEM_SEG 51
+#define IWX_UCODE_TLV_IML 52
+#define IWX_UCODE_TLV_FW_FMAC_API_VERSION 53
+#define IWX_UCODE_TLV_UMAC_DEBUG_ADDRS 54
+#define IWX_UCODE_TLV_LMAC_DEBUG_ADDRS 55
+#define IWX_UCODE_TLV_FW_RECOVERY_INFO 57
+#define IWX_UCODE_TLV_HW_TYPE 58
+#define IWX_UCODE_TLV_FW_FMAC_RECOVERY_INFO 59
+#define IWX_UCODE_TLV_FW_FSEQ_VERSION 60
+#define IWX_UCODE_TLV_PHY_INTEGRATION_VERSION 61
+#define IWX_UCODE_TLV_PNVM_VERSION 62
+#define IWX_UCODE_TLV_PNVM_SKU 64
+
+#define IWX_UCODE_TLV_SEC_TABLE_ADDR 66
+#define IWX_UCODE_TLV_D3_KEK_KCK_ADDR 67
+#define IWX_UCODE_TLV_CURRENT_PC 68
+
+#define IWX_UCODE_TLV_CONST_BASE 0x100
+#define IWX_UCODE_TLV_FW_NUM_STATIONS (IWX_UCODE_TLV_CONST_BASE + 0)
+#define IWX_UCODE_TLV_FW_NUM_BEACONS (IWX_UCODE_TLV_CONST_BASE + 2)
+
+#define IWX_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWX_UCODE_TLV_TYPE_DEBUG_INFO (IWX_UCODE_TLV_DEBUG_BASE + 0)
+#define IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION (IWX_UCODE_TLV_DEBUG_BASE + 1)
+#define IWX_UCODE_TLV_TYPE_HCMD (IWX_UCODE_TLV_DEBUG_BASE + 2)
+#define IWX_UCODE_TLV_TYPE_REGIONS (IWX_UCODE_TLV_DEBUG_BASE + 3)
+#define IWX_UCODE_TLV_TYPE_TRIGGERS (IWX_UCODE_TLV_DEBUG_BASE + 4)
+#define IWX_UCODE_TLV_TYPE_CONF_SET (IWX_UCODE_TLV_DEBUG_BASE + 5)
+#define IWX_UCODE_TLV_DEBUG_MAX IWX_UCODE_TLV_TYPE_CONF_SET
+
+
+struct iwx_ucode_tlv {
+ uint32_t type; /* see above */
+ uint32_t length; /* not including type/length fields */
+ uint8_t data[0];
+};
+
+struct iwx_ucode_api {
+ uint32_t api_index;
+ uint32_t api_flags;
+} __packed;
+
+struct iwx_ucode_capa {
+ uint32_t api_index;
+ uint32_t api_capa;
+} __packed;
+
+#define IWX_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwx_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ uint32_t zero;
+ uint32_t magic;
+ uint8_t human_readable[64];
+ uint32_t ver; /* major/minor/API/serial */
+ uint32_t build;
+ uint64_t ignore;
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ uint8_t data[0];
+};
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via IWX_HBUS_TARG_PRPH_* registers.
+ */
+#define IWX_PRPH_BASE (0x00000)
+#define IWX_PRPH_END (0xFFFFF)
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * struct iwx_rb_status - receive buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transferred
+ */
+struct iwx_rb_status {
+ uint16_t closed_rb_num;
+ uint16_t closed_fr_num;
+ uint16_t finished_rb_num;
+ uint16_t finished_fr_nam;
+ uint32_t unused;
+} __packed;
+
+
+#define IWX_TFD_QUEUE_SIZE_MAX (256)
+#define IWX_TFD_QUEUE_SIZE_MAX_GEN3 (65536)
+/* cb size is the exponent - 3 */
+#define IWX_TFD_QUEUE_CB_SIZE(x) (IWX_RX_QUEUE_CB_SIZE(x) - 3)
+#define IWX_TFD_QUEUE_SIZE_BC_DUP (64)
+#define IWX_TFD_QUEUE_BC_SIZE (IWX_TFD_QUEUE_SIZE_MAX + \
+ IWX_TFD_QUEUE_SIZE_BC_DUP)
+#define IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
+#define IWX_TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
+#define IWX_TFH_NUM_TBS 25
+
+/**
+ * struct iwx_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwx_tfh_tb {
+ uint16_t tb_len;
+ uint64_t addr;
+} __packed;
+
+/**
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs.
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ *
+ * Each TFD contains pointer/size information for up to 25 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+
+/**
+ * struct iwx_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwx_tfh_tfd {
+ uint16_t num_tbs;
+ struct iwx_tfh_tb tbs[IWX_TFH_NUM_TBS];
+ uint32_t __pad;
+} __packed;
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwx_agn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by IWX_SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-11 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-15 - reserved
+ */
+struct iwx_agn_scd_bc_tbl {
+ uint16_t tfd_offset[IWX_TFD_QUEUE_BC_SIZE];
+} __packed;
+
+/**
+ * struct iwx_gen3_bc_tbl_entry scheduler byte count table entry gen3
+ * For AX210 and up, the table no longer needs to be contiguous in memory.
+ * @tfd_offset: 0-13 - tx command byte count
+ * 14-15 - number of 64 byte chunks
+ */
+struct iwx_gen3_bc_tbl_entry {
+ uint16_t tfd_offset;
+} __packed;
+
+/**
+ * DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * to allow dynamic allocation of queues on-demand, rather than allocate them
+ * statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ */
+
+/* static DQA Tx queue numbers */
+#define IWX_DQA_CMD_QUEUE 0
+#define IWX_DQA_AUX_QUEUE 1
+
+#define IWX_DQA_INJECT_MONITOR_QUEUE 2 /* used in monitor mode only */
+#define IWX_DQA_MGMT_QUEUE 1 /* default queue other modes */
+
+/* Reserve 8 DQA Tx queues for QoS data frames. */
+#define IWX_MAX_TID_COUNT 8
+#define IWX_FIRST_AGG_TX_QUEUE (IWX_DQA_MGMT_QUEUE + 1)
+#define IWX_LAST_AGG_TX_QUEUE (IWX_FIRST_AGG_TX_QUEUE + IWX_MAX_TID_COUNT - 1)
+#define IWX_NUM_TX_QUEUES (IWX_LAST_AGG_TX_QUEUE + 1)
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ */
+#define IWX_FRAME_LIMIT 64
+
+#define IWX_TX_FIFO_BK 0
+#define IWX_TX_FIFO_BE 1
+#define IWX_TX_FIFO_VI 2
+#define IWX_TX_FIFO_VO 3
+#define IWX_TX_FIFO_MCAST 5
+#define IWX_TX_FIFO_CMD 7
+
+enum iwx_gen2_tx_fifo {
+ IWX_GEN2_TX_FIFO_CMD = 0,
+ IWX_GEN2_EDCA_TX_FIFO_BK,
+ IWX_GEN2_EDCA_TX_FIFO_BE,
+ IWX_GEN2_EDCA_TX_FIFO_VI,
+ IWX_GEN2_EDCA_TX_FIFO_VO,
+ IWX_GEN2_TRIG_TX_FIFO_BK,
+ IWX_GEN2_TRIG_TX_FIFO_BE,
+ IWX_GEN2_TRIG_TX_FIFO_VI,
+ IWX_GEN2_TRIG_TX_FIFO_VO,
+};
+
+/**
+ * TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+#define IWX_TX_QUEUE_CFG_ENABLE_QUEUE (1 << 0)
+#define IWX_TX_QUEUE_CFG_TFD_SHORT_FORMAT (1 << 1)
+
+#define IWX_DEFAULT_QUEUE_SIZE IWX_TFD_QUEUE_SIZE_MAX
+
+/**
+ * struct iwx_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwx_tx_queue_cfg_cmd {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint16_t flags;
+ uint32_t cb_size;
+ uint64_t byte_cnt_addr;
+ uint64_t tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ * @reserved: reserved
+ */
+struct iwx_tx_queue_cfg_rsp {
+ uint16_t queue_number;
+ uint16_t flags;
+ uint16_t write_pointer;
+ uint16_t reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+
+/*
+ * Commands
+ */
+#define IWX_ALIVE 0x1
+#define IWX_REPLY_ERROR 0x2
+#define IWX_INIT_COMPLETE_NOTIF 0x4
+
+/* PHY context commands */
+#define IWX_PHY_CONTEXT_CMD 0x8
+#define IWX_DBG_CFG 0x9
+
+/* UMAC scan commands */
+#define IWX_SCAN_ITERATION_COMPLETE_UMAC 0xb5
+#define IWX_SCAN_CFG_CMD 0xc
+#define IWX_SCAN_REQ_UMAC 0xd
+#define IWX_SCAN_ABORT_UMAC 0xe
+#define IWX_SCAN_COMPLETE_UMAC 0xf
+
+/* station table */
+#define IWX_ADD_STA_KEY 0x17
+#define IWX_ADD_STA 0x18
+#define IWX_REMOVE_STA 0x19
+
+/* TX */
+#define IWX_TX_CMD 0x1c
+#define IWX_TXPATH_FLUSH 0x1e
+#define IWX_MGMT_MCAST_KEY 0x1f
+
+/* scheduler config */
+#define IWX_SCD_QUEUE_CFG 0x1d
+
+/* global key */
+#define IWX_WEP_KEY 0x20
+
+/* MAC and Binding commands */
+#define IWX_MAC_CONTEXT_CMD 0x28
+#define IWX_TIME_EVENT_CMD 0x29 /* both CMD and response */
+#define IWX_TIME_EVENT_NOTIFICATION 0x2a
+#define IWX_BINDING_CONTEXT_CMD 0x2b
+#define IWX_TIME_QUOTA_CMD 0x2c
+#define IWX_NON_QOS_TX_COUNTER_CMD 0x2d
+
+/* Calibration */
+#define IWX_TEMPERATURE_NOTIFICATION 0x62
+#define IWX_CALIBRATION_CFG_CMD 0x65
+#define IWX_CALIBRATION_RES_NOTIFICATION 0x66
+#define IWX_CALIBRATION_COMPLETE_NOTIFICATION 0x67
+#define IWX_RADIO_VERSION_NOTIFICATION 0x68
+
+/* Phy */
+#define IWX_PHY_CONFIGURATION_CMD 0x6a
+
+/* Power - legacy power table command */
+#define IWX_POWER_TABLE_CMD 0x77
+#define IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION 0x78
+#define IWX_LTR_CONFIG 0xee
+
+/* NVM */
+#define IWX_NVM_ACCESS_CMD 0x88
+
+#define IWX_SET_CALIB_DEFAULT_CMD 0x8e
+
+#define IWX_BEACON_NOTIFICATION 0x90
+#define IWX_BEACON_TEMPLATE_CMD 0x91
+#define IWX_TX_ANT_CONFIGURATION_CMD 0x98
+#define IWX_BT_CONFIG 0x9b
+#define IWX_STATISTICS_CMD 0x9c
+#define IWX_STATISTICS_NOTIFICATION 0x9d
+#define IWX_REDUCE_TX_POWER_CMD 0x9f
+
+/* RF-KILL commands and notifications */
+#define IWX_CARD_STATE_CMD 0xa0
+#define IWX_CARD_STATE_NOTIFICATION 0xa1
+
+#define IWX_MISSED_BEACONS_NOTIFICATION 0xa2
+
+#define IWX_MFUART_LOAD_NOTIFICATION 0xb1
+
+/* Power - new power table command */
+#define IWX_MAC_PM_POWER_TABLE 0xa9
+
+#define IWX_REPLY_RX_PHY_CMD 0xc0
+#define IWX_REPLY_RX_MPDU_CMD 0xc1
+#define IWX_BAR_FRAME_RELEASE 0xc2
+#define IWX_FRAME_RELEASE 0xc3
+#define IWX_BA_NOTIF 0xc5
+
+/* Location Aware Regulatory */
+#define IWX_MCC_UPDATE_CMD 0xc8
+#define IWX_MCC_CHUB_UPDATE_CMD 0xc9
+
+/* BT Coex */
+#define IWX_BT_COEX_PRIO_TABLE 0xcc
+#define IWX_BT_COEX_PROT_ENV 0xcd
+#define IWX_BT_PROFILE_NOTIFICATION 0xce
+#define IWX_BT_COEX_CI 0x5d
+
+#define IWX_REPLY_SF_CFG_CMD 0xd1
+#define IWX_REPLY_BEACON_FILTERING_CMD 0xd2
+
+/* DTS measurements */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER 0xdc
+#define IWX_DTS_MEASUREMENT_NOTIFICATION 0xdd
+
+#define IWX_REPLY_DEBUG_CMD 0xf0
+#define IWX_DEBUG_LOG_MSG 0xf7
+
+#define IWX_MCAST_FILTER_CMD 0xd0
+
+/* D3 commands/notifications */
+#define IWX_D3_CONFIG_CMD 0xd3
+#define IWX_PROT_OFFLOAD_CONFIG_CMD 0xd4
+#define IWX_OFFLOADS_QUERY_CMD 0xd5
+#define IWX_REMOTE_WAKE_CONFIG_CMD 0xd6
+
+/* for WoWLAN in particular */
+#define IWX_WOWLAN_PATTERNS 0xe0
+#define IWX_WOWLAN_CONFIGURATION 0xe1
+#define IWX_WOWLAN_TSC_RSC_PARAM 0xe2
+#define IWX_WOWLAN_TKIP_PARAM 0xe3
+#define IWX_WOWLAN_KEK_KCK_MATERIAL 0xe4
+#define IWX_WOWLAN_GET_STATUSES 0xe5
+#define IWX_WOWLAN_TX_POWER_PER_DB 0xe6
+
+/* and for NetDetect */
+#define IWX_NET_DETECT_CONFIG_CMD 0x54
+#define IWX_NET_DETECT_PROFILES_QUERY_CMD 0x56
+#define IWX_NET_DETECT_PROFILES_CMD 0x57
+#define IWX_NET_DETECT_HOTSPOTS_CMD 0x58
+#define IWX_NET_DETECT_HOTSPOTS_QUERY_CMD 0x59
+
+/* system group command IDs */
+#define IWX_FSEQ_VER_MISMATCH_NOTIFICATION 0xff
+
+#define IWX_REPLY_MAX 0xff
+
+/* PHY_OPS subcommand IDs */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE 0x0
+#define IWX_CTDP_CONFIG_CMD 0x03
+#define IWX_TEMP_REPORTING_THRESHOLDS_CMD 0x04
+#define IWX_CT_KILL_NOTIFICATION 0xFE
+#define IWX_DTS_MEASUREMENT_NOTIF_WIDE 0xFF
+
+/* command groups */
+#define IWX_LEGACY_GROUP 0x0
+#define IWX_LONG_GROUP 0x1
+#define IWX_SYSTEM_GROUP 0x2
+#define IWX_MAC_CONF_GROUP 0x3
+#define IWX_PHY_OPS_GROUP 0x4
+#define IWX_DATA_PATH_GROUP 0x5
+#define IWX_PROT_OFFLOAD_GROUP 0xb
+#define IWX_REGULATORY_AND_NVM_GROUP 0xc
+
+/* SYSTEM_GROUP group subcommand IDs */
+
+#define IWX_SHARED_MEM_CFG_CMD 0x00
+#define IWX_SOC_CONFIGURATION_CMD 0x01
+#define IWX_INIT_EXTENDED_CFG_CMD 0x03
+#define IWX_FW_ERROR_RECOVERY_CMD 0x07
+
+/* MAC_CONF group subcommand IDs */
+#define IWX_SESSION_PROTECTION_CMD 0x05
+#define IWX_SESSION_PROTECTION_NOTIF 0xfb
+
+/* DATA_PATH group subcommand IDs */
+#define IWX_DQA_ENABLE_CMD 0x00
+#define IWX_RLC_CONFIG_CMD 0x08
+#define IWX_TLC_MNG_CONFIG_CMD 0x0f
+#define IWX_RX_BAID_ALLOCATION_CONFIG_CMD 0x16
+#define IWX_SCD_QUEUE_CONFIG_CMD 0x17
+#define IWX_RX_NO_DATA_NOTIF 0xf5
+#define IWX_TLC_MNG_UPDATE_NOTIF 0xf7
+
+/* REGULATORY_AND_NVM group subcommand IDs */
+#define IWX_NVM_ACCESS_COMPLETE 0x00
+#define IWX_NVM_GET_INFO 0x02
+#define IWX_NVM_GET_INFO 0x02
+#define IWX_PNVM_INIT_COMPLETE 0xfe
+
+/*
+ * struct iwx_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwx_dqa_enable_cmd {
+ uint32_t cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwx_cmd_response {
+ uint32_t status;
+};
+
+/*
+ * struct iwx_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwx_tx_ant_cfg_cmd {
+ uint32_t valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+/* This defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through IWX_CALIBRATION_CFG_CMD.
+ */
+#define IWX_CALIB_CFG_XTAL_IDX (1 << 0)
+#define IWX_CALIB_CFG_TEMPERATURE_IDX (1 << 1)
+#define IWX_CALIB_CFG_VOLTAGE_READ_IDX (1 << 2)
+#define IWX_CALIB_CFG_PAPD_IDX (1 << 3)
+#define IWX_CALIB_CFG_TX_PWR_IDX (1 << 4)
+#define IWX_CALIB_CFG_DC_IDX (1 << 5)
+#define IWX_CALIB_CFG_BB_FILTER_IDX (1 << 6)
+#define IWX_CALIB_CFG_LO_LEAKAGE_IDX (1 << 7)
+#define IWX_CALIB_CFG_TX_IQ_IDX (1 << 8)
+#define IWX_CALIB_CFG_TX_IQ_SKEW_IDX (1 << 9)
+#define IWX_CALIB_CFG_RX_IQ_IDX (1 << 10)
+#define IWX_CALIB_CFG_RX_IQ_SKEW_IDX (1 << 11)
+#define IWX_CALIB_CFG_SENSITIVITY_IDX (1 << 12)
+#define IWX_CALIB_CFG_CHAIN_NOISE_IDX (1 << 13)
+#define IWX_CALIB_CFG_DISCONNECTED_ANT_IDX (1 << 14)
+#define IWX_CALIB_CFG_ANT_COUPLING_IDX (1 << 15)
+#define IWX_CALIB_CFG_DAC_IDX (1 << 16)
+#define IWX_CALIB_CFG_ABS_IDX (1 << 17)
+#define IWX_CALIB_CFG_AGC_IDX (1 << 18)
+
+/*
+ * Phy configuration command.
+ */
+struct iwx_phy_cfg_cmd {
+ uint32_t phy_cfg;
+ struct iwx_calib_ctrl calib_control;
+} __packed;
+
+#define IWX_PHY_CFG_RADIO_TYPE ((1 << 0) | (1 << 1))
+#define IWX_PHY_CFG_RADIO_STEP ((1 << 2) | (1 << 3))
+#define IWX_PHY_CFG_RADIO_DASH ((1 << 4) | (1 << 5))
+#define IWX_PHY_CFG_PRODUCT_NUMBER ((1 << 6) | (1 << 7))
+#define IWX_PHY_CFG_TX_CHAIN_A (1 << 8)
+#define IWX_PHY_CFG_TX_CHAIN_B (1 << 9)
+#define IWX_PHY_CFG_TX_CHAIN_C (1 << 10)
+#define IWX_PHY_CFG_RX_CHAIN_A (1 << 12)
+#define IWX_PHY_CFG_RX_CHAIN_B (1 << 13)
+#define IWX_PHY_CFG_RX_CHAIN_C (1 << 14)
+
+#define IWX_MAX_DTS_TRIPS 8
+
+/**
+ * struct iwx_ct_kill_notif - CT-kill entry notification
+ *
+ * @temperature: the current temperature in celsius
+ * @reserved: reserved
+ */
+struct iwx_ct_kill_notif {
+ uint16_t temperature;
+ uint16_t reserved;
+} __packed; /* GRP_PHY_CT_KILL_NTF */
+
+/**
+ * struct iwx_temp_report_ths_cmd - set temperature thresholds
+ * (IWX_TEMP_REPORTING_THRESHOLDS_CMD)
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct iwx_temp_report_ths_cmd {
+ uint32_t num_temps;
+ uint16_t thresholds[IWX_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
+
+/*
+ * channel flags in NVM
+ * @IWX_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @IWX_NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @IWX_NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @IWX_NVM_CHANNEL_RADAR: radar detection required
+ * @IWX_NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @IWX_NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ * on same channel on 2.4 or same UNII band on 5.2
+ * @IWX_NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @IWX_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ */
+#define IWX_NVM_CHANNEL_VALID (1 << 0)
+#define IWX_NVM_CHANNEL_IBSS (1 << 1)
+#define IWX_NVM_CHANNEL_ACTIVE (1 << 3)
+#define IWX_NVM_CHANNEL_RADAR (1 << 4)
+#define IWX_NVM_CHANNEL_INDOOR_ONLY (1 << 5)
+#define IWX_NVM_CHANNEL_GO_CONCURRENT (1 << 6)
+#define IWX_NVM_CHANNEL_DFS (1 << 7)
+#define IWX_NVM_CHANNEL_WIDE (1 << 8)
+#define IWX_NVM_CHANNEL_40MHZ (1 << 9)
+#define IWX_NVM_CHANNEL_80MHZ (1 << 10)
+#define IWX_NVM_CHANNEL_160MHZ (1 << 11)
+#define IWX_NVM_CHANNEL_DC_HIGH (1 << 12)
+
+/**
+ * struct iwx_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ * @reserved: reserved
+ */
+struct iwx_nvm_access_complete_cmd {
+ uint32_t reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
+/*
+ * struct iwx_nvm_get_info - request to get NVM data
+ */
+struct iwx_nvm_get_info {
+ uint32_t reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
+
+/**
+ * enum iwx_nvm_info_general_flags - flags in NVM_GET_INFO resp
+ * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
+ */
+#define IWX_NVM_GENERAL_FLAGS_EMPTY_OTP (1 << 0)
+
+/**
+ * struct iwx_nvm_get_info_general - general NVM data
+ * @flags: bit 0: 1 - empty, 0 - non-empty
+ * @nvm_version: nvm version
+ * @board_type: board type
+ * @n_hw_addrs: number of reserved MAC addresses
+ */
+struct iwx_nvm_get_info_general {
+ uint32_t flags;
+ uint16_t nvm_version;
+ uint8_t board_type;
+ uint8_t n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
+
+/**
+ * iwx_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+ * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
+ * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
+ * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
+ * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
+ * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
+ * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
+ */
+#define IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED (1 << 0)
+#define IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED (1 << 1)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED (1 << 2)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED (1 << 3)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED (1 << 4)
+#define IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED (1 << 5)
+#define IWX_NVM_MAC_SKU_FLAGS_WAPI_ENABLED (1 << 8)
+#define IWX_NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED (1 << 14)
+#define IWX_NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED (1 << 15)
+
+/**
+ * struct iwx_nvm_get_info_sku - mac information
+ * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
+ */
+struct iwx_nvm_get_info_sku {
+ uint32_t mac_sku_flags;
+} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
+
+/**
+ * struct iwx_nvm_get_info_phy - phy information
+ * @tx_chains: BIT 0 chain A, BIT 1 chain B
+ * @rx_chains: BIT 0 chain A, BIT 1 chain B
+ */
+struct iwx_nvm_get_info_phy {
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+#define IWX_NUM_CHANNELS_V1 51
+#define IWX_NUM_CHANNELS 110
+
+/**
+ * struct iwx_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @channel_profile: regulatory data of this channel
+ * @reserved: reserved
+ */
+struct iwx_nvm_get_info_regulatory_v1 {
+ uint32_t lar_enabled;
+ uint16_t channel_profile[IWX_NUM_CHANNELS_V1];
+ uint16_t reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+/**
+ * struct iwx_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @n_channels: number of valid channels in the array
+ * @channel_profile: regulatory data of this channel
+ */
+struct iwx_nvm_get_info_regulatory {
+ uint32_t lar_enabled;
+ uint32_t n_channels;
+ uint32_t channel_profile[IWX_NUM_CHANNELS];
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
+
+/**
+ * struct iwx_nvm_get_info_rsp_v3 - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwx_nvm_get_info_rsp_v3 {
+ struct iwx_nvm_get_info_general general;
+ struct iwx_nvm_get_info_sku mac_sku;
+ struct iwx_nvm_get_info_phy phy_sku;
+ struct iwx_nvm_get_info_regulatory_v1 regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+/**
+ * struct iwx_nvm_get_info_rsp - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwx_nvm_get_info_rsp {
+ struct iwx_nvm_get_info_general general;
+ struct iwx_nvm_get_info_sku mac_sku;
+ struct iwx_nvm_get_info_phy phy_sku;
+ struct iwx_nvm_get_info_regulatory regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
+
+
+#define IWX_ALIVE_STATUS_ERR 0xDEAD
+#define IWX_ALIVE_STATUS_OK 0xCAFE
+
+struct iwx_lmac_debug_addrs {
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for LMAC event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+ uint32_t st_fwrd_addr; /* pointer to Store and forward */
+ uint32_t st_fwrd_size;
+} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */
+
+struct iwx_lmac_alive {
+ uint32_t ucode_major;
+ uint32_t ucode_minor;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint32_t timestamp;
+ struct iwx_lmac_debug_addrs dbg_ptrs;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwx_umac_debug_addrs {
+ uint32_t error_info_addr; /* SRAM address for UMAC error log */
+ uint32_t dbg_print_buff_addr;
+} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */
+
+struct iwx_umac_alive {
+ uint32_t umac_major; /* UMAC version: major */
+ uint32_t umac_minor; /* UMAC version: minor */
+ struct iwx_umac_debug_addrs dbg_ptrs;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct iwx_alive_resp_v4 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
+struct iwx_sku_id {
+ uint32_t data[3];
+} __packed; /* SKU_ID_API_S_VER_1 */
+
+struct iwx_alive_resp_v5 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+ struct iwx_sku_id sku_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
+
+struct iwx_imr_alive_info {
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t enabled;
+} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */
+
+struct iwx_alive_resp_v6 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+ struct iwx_sku_id sku_id;
+ struct iwx_imr_alive_info imr;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+
+
+#define IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE (1 << 0)
+#define IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY (1 << 1)
+
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
+/**
+ * struct iwx_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwx_soc_configuration_cmd {
+ uint32_t flags;
+ uint32_t latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+/**
+ * commands driver may send before finishing init flow
+ * @IWX_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWX_INIT_NVM: driver is going to send NVM_ACCESS commands
+ */
+#define IWX_INIT_DEBUG_CFG (1 << 0)
+#define IWX_INIT_NVM (1 << 1)
+
+/**
+ * struct iwx_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: IWX_INIT_* flag bits
+ */
+struct iwx_init_extended_cfg_cmd {
+ uint32_t init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
+/* Error response/notification */
+#define IWX_FW_ERR_UNKNOWN_CMD 0x0
+#define IWX_FW_ERR_INVALID_CMD_PARAM 0x1
+#define IWX_FW_ERR_SERVICE 0x2
+#define IWX_FW_ERR_ARC_MEMORY 0x3
+#define IWX_FW_ERR_ARC_CODE 0x4
+#define IWX_FW_ERR_WATCH_DOG 0x5
+#define IWX_FW_ERR_WEP_GRP_KEY_INDX 0x10
+#define IWX_FW_ERR_WEP_KEY_SIZE 0x11
+#define IWX_FW_ERR_OBSOLETE_FUNC 0x12
+#define IWX_FW_ERR_UNEXPECTED 0xFE
+#define IWX_FW_ERR_FATAL 0xFF
+
+/**
+ * struct iwx_error_resp - FW error indication
+ * ( IWX_REPLY_ERROR = 0x2 )
+ * @error_type: one of IWX_FW_ERR_*
+ * @cmd_id: the command ID for which the error occurred
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwx_error_resp {
+ uint32_t error_type;
+ uint8_t cmd_id;
+ uint8_t reserved1;
+ uint16_t bad_cmd_seq_num;
+ uint32_t error_service;
+ uint64_t timestamp;
+} __packed;
+
+enum iwx_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
+};
+
+/**
+ * struct iwx_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: &enum iwx_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwx_fw_dbg_reg_op {
+ uint8_t op;
+ uint8_t reserved[3];
+ uint32_t addr;
+ uint32_t val;
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
+ */
+enum iwx_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+ MIPI_MODE = 3,
+};
+
+/**
+ * struct iwx_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: the memory segment type to record
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWX_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwx_fw_dbg_mem_seg_tlv {
+ uint32_t data_type;
+ uint32_t ofs;
+ uint32_t len;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: &enum iwx_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWX_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwx_fw_dbg_dest_tlv_v1 {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t base_reg;
+ uint32_t end_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t end_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWX_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWX_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWX_M2S_UNIT_SIZE 0x100
+
+struct iwx_fw_dbg_dest_tlv {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t cfg_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t size_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwx_fw_dbg_conf_hcmd {
+ uint8_t id;
+ uint8_t reserved;
+ uint16_t len;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_trigger_mode - triggers functionalities
+ *
+ * @IWX_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWX_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWX_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
+ */
+enum iwx_fw_dbg_trigger_mode {
+ IWX_FW_DBG_TRIGGER_START = (1 << 0),
+ IWX_FW_DBG_TRIGGER_STOP = (1 << 1),
+ IWX_FW_DBG_TRIGGER_MONITOR_ONLY = (1 << 2),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_flags - the flags supported by wrt triggers
+ * @IWX_FW_DBG_FORCE_RESTART: force a firmware restart
+ */
+enum iwx_fw_dbg_trigger_flags {
+ IWX_FW_DBG_FORCE_RESTART = (1 << 0),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWX_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWX_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWX_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWX_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ * @IWX_FW_DBG_CONF_VIF_NAN: NAN device
+ */
+enum iwx_fw_dbg_trigger_vif_type {
+ IWX_FW_DBG_CONF_VIF_ANY = 0,
+ IWX_FW_DBG_CONF_VIF_IBSS = 1,
+ IWX_FW_DBG_CONF_VIF_STATION = 2,
+ IWX_FW_DBG_CONF_VIF_AP = 3,
+ IWX_FW_DBG_CONF_VIF_P2P_CLIENT = 8,
+ IWX_FW_DBG_CONF_VIF_P2P_GO = 9,
+ IWX_FW_DBG_CONF_VIF_P2P_DEVICE = 10,
+ IWX_FW_DBG_CONF_VIF_NAN = 12,
+};
+
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ * This should not be defined as a trigger to the driver, but a value the
+ * driver should set to indicate that the trigger was initiated by the
+ * user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ * missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ * command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ * goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ * detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ * events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
+ * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
+ * threshold.
+ * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ * the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_USER_EXTENDED: trigger log collection upon user space
+ * request.
+ * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
+ * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
+ * in the driver.
+ */
+enum iwx_fw_dbg_trigger {
+ IWX_FW_DBG_TRIGGER_INVALID = 0,
+ IWX_FW_DBG_TRIGGER_USER,
+ IWX_FW_DBG_TRIGGER_FW_ASSERT,
+ IWX_FW_DBG_TRIGGER_MISSED_BEACONS,
+ IWX_FW_DBG_TRIGGER_CHANNEL_SWITCH,
+ IWX_FW_DBG_TRIGGER_FW_NOTIF,
+ IWX_FW_DBG_TRIGGER_MLME,
+ IWX_FW_DBG_TRIGGER_STATS,
+ IWX_FW_DBG_TRIGGER_RSSI,
+ IWX_FW_DBG_TRIGGER_TXQ_TIMERS,
+ IWX_FW_DBG_TRIGGER_TIME_EVENT,
+ IWX_FW_DBG_TRIGGER_BA,
+ IWX_FW_DBG_TRIGGER_TX_LATENCY,
+ IWX_FW_DBG_TRIGGER_TDLS,
+ IWX_FW_DBG_TRIGGER_TX_STATUS,
+ IWX_FW_DBG_TRIGGER_USER_EXTENDED,
+ IWX_FW_DBG_TRIGGER_ALIVE_TIMEOUT,
+ IWX_FW_DBG_TRIGGER_DRIVER,
+
+ /* must be last */
+ IWX_FW_DBG_TRIGGER_MAX,
+};
+
+
+/**
+ * struct iwx_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: &enum iwx_fw_dbg_trigger
+ * @vif_type: &enum iwx_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ * if the mode is %IWX_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ * to the currently running configuration is set, the data should be
+ * collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ * after the STOP trigger fires.
+ * @mode: &enum iwx_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWX_FW_DBG_TRIGGER_START, this defines what
+ * configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ * @trig_dis_ms: the time, in milliseconds, after an occurrence of this
+ * trigger in which another occurrence should be ignored.
+ * @flags: &enum iwx_fw_dbg_trigger_flags
+ */
+struct iwx_fw_dbg_trigger_tlv {
+ uint32_t id;
+ uint32_t vif_type;
+ uint32_t stop_conf_ids;
+ uint32_t stop_delay;
+ uint8_t mode;
+ uint8_t start_conf_id;
+ uint16_t occurrences;
+ uint16_t trig_dis_ms;
+ uint8_t flags;
+ uint8_t reserved[5];
+
+ uint8_t data[0];
+} __packed;
+
+#define IWX_FW_DBG_START_FROM_ALIVE 0
+#define IWX_FW_DBG_CONF_MAX 32
+#define IWX_FW_DBG_INVALID 0xff
+
+/**
+ * struct iwx_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwx_fw_dbg_trigger_missed_bcon {
+ uint32_t stop_consec_missed_bcon;
+ uint32_t stop_consec_missed_bcon_since_rx;
+ uint32_t reserved2[2];
+ uint32_t start_consec_missed_bcon;
+ uint32_t start_consec_missed_bcon_since_rx;
+ uint32_t reserved1[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_cmd {
+ struct cmd {
+ uint8_t cmd_id;
+ uint8_t group_id;
+ } __packed cmds[16];
+} __packed;
+
+/**
+ * iwx_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwx_fw_dbg_trigger_stats {
+ uint32_t stop_offset;
+ uint32_t stop_threshold;
+ uint32_t start_offset;
+ uint32_t start_threshold;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwx_fw_dbg_trigger_low_rssi {
+ uint32_t rssi;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwx_fw_dbg_trigger_mlme {
+ uint8_t stop_auth_denied;
+ uint8_t stop_auth_timeout;
+ uint8_t stop_rx_deauth;
+ uint8_t stop_tx_deauth;
+
+ uint8_t stop_assoc_denied;
+ uint8_t stop_assoc_timeout;
+ uint8_t stop_connection_loss;
+ uint8_t reserved;
+
+ uint8_t start_auth_denied;
+ uint8_t start_auth_timeout;
+ uint8_t start_rx_deauth;
+ uint8_t start_tx_deauth;
+
+ uint8_t start_assoc_denied;
+ uint8_t start_assoc_timeout;
+ uint8_t start_connection_loss;
+ uint8_t reserved2;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwx_fw_dbg_trigger_txq_timer {
+ uint32_t command_queue;
+ uint32_t bss;
+ uint32_t softap;
+ uint32_t p2p_go;
+ uint32_t p2p_client;
+ uint32_t p2p_device;
+ uint32_t ibss;
+ uint32_t tdls;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ * trigger each time a time event notification that relates to time event
+ * id with one of the actions in the bitmap is received and
+ * BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwx_fw_dbg_trigger_time_event {
+ struct {
+ uint32_t id;
+ uint32_t action_bitmap;
+ uint32_t status_bitmap;
+ } __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reordering buffer.
+ */
+struct iwx_fw_dbg_trigger_ba {
+ uint16_t rx_ba_start;
+ uint16_t rx_ba_stop;
+ uint16_t tx_ba_start;
+ uint16_t tx_ba_stop;
+ uint16_t rx_bar;
+ uint16_t tx_bar;
+ uint16_t frame_timeout;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_latency - configures tx latency related trigger
+ * @thrshold: the wanted threshold.
+ * @tid_bitmap: the tid to apply the threshold on
+ * @mode: recording mode (Internal buffer or continues recording)
+ * @window: the size of the window before collecting.
+ * @reserved: reserved.
+ */
+struct iwx_fw_dbg_trigger_tx_latency {
+ uint32_t thrshold;
+ uint16_t tid_bitmap;
+ uint16_t mode;
+ uint32_t window;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tdls - configures trigger for TDLS events.
+ * @action_bitmap: the TDLS action to trigger the collection upon
+ * @peer_mode: trigger on specific peer or all
+ * @peer: the TDLS peer to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tdls {
+ uint8_t action_bitmap;
+ uint8_t peer_mode;
+ uint8_t peer[ETHER_ADDR_LEN];
+ uint8_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_status - configures trigger for tx response
+ * status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tx_status {
+ struct tx_status {
+ uint8_t status;
+ uint8_t reserved[3];
+ } __packed statuses[16];
+ uint32_t reserved[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWX_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %IWX_FW_DBG_CONF_MAX configuration per run.
+ */
+struct iwx_fw_dbg_conf_tlv {
+ uint8_t id;
+ uint8_t usniffer;
+ uint8_t reserved;
+ uint8_t num_of_hcmds;
+ struct iwx_fw_dbg_conf_hcmd hcmd;
+} __packed;
+
+#define IWX_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwx_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwx_fw_cmd_version {
+ uint8_t cmd;
+ uint8_t group;
+ uint8_t cmd_ver;
+ uint8_t notif_ver;
+} __packed;
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define IWX_MAX_MACS_IN_BINDING (3)
+#define IWX_MAX_BINDINGS (4)
+#define IWX_AUX_BINDING_INDEX (3)
+#define IWX_MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define IWX_FW_CTXT_ID_POS (0)
+#define IWX_FW_CTXT_ID_MSK (0xff << IWX_FW_CTXT_ID_POS)
+#define IWX_FW_CTXT_COLOR_POS (8)
+#define IWX_FW_CTXT_COLOR_MSK (0xff << IWX_FW_CTXT_COLOR_POS)
+#define IWX_FW_CTXT_INVALID (0xffffffff)
+
+#define IWX_FW_CMD_ID_AND_COLOR(_id, _color) ((_id << IWX_FW_CTXT_ID_POS) |\
+ (_color << IWX_FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+#define IWX_FW_CTXT_ACTION_STUB 0
+#define IWX_FW_CTXT_ACTION_ADD 1
+#define IWX_FW_CTXT_ACTION_MODIFY 2
+#define IWX_FW_CTXT_ACTION_REMOVE 3
+#define IWX_FW_CTXT_ACTION_NUM 4
+/* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+
+/* BSS Station Events */
+#define IWX_TE_BSS_STA_AGGRESSIVE_ASSOC 0
+#define IWX_TE_BSS_STA_ASSOC 1
+#define IWX_TE_BSS_EAP_DHCP_PROT 2
+#define IWX_TE_BSS_QUIET_PERIOD 3
+
+/* P2P Device Events */
+#define IWX_TE_P2P_DEVICE_DISCOVERABLE 4
+#define IWX_TE_P2P_DEVICE_LISTEN 5
+#define IWX_TE_P2P_DEVICE_ACTION_SCAN 6
+#define IWX_TE_P2P_DEVICE_FULL_SCAN 7
+
+/* P2P Client Events */
+#define IWX_TE_P2P_CLIENT_AGGRESSIVE_ASSOC 8
+#define IWX_TE_P2P_CLIENT_ASSOC 9
+#define IWX_TE_P2P_CLIENT_QUIET_PERIOD 10
+
+/* P2P GO Events */
+#define IWX_TE_P2P_GO_ASSOC_PROT 11
+#define IWX_TE_P2P_GO_REPETITIVE_NOA 12
+#define IWX_TE_P2P_GO_CT_WINDOW 13
+
+/* WiDi Sync Events */
+#define IWX_TE_WIDI_TX_SYNC 14
+
+/* Time event - defines for command API */
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/*
+ * @IWX_TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @IWX_TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @IWX_TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @IWX_TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+#define IWX_TE_V2_FRAG_NONE 0
+#define IWX_TE_V2_FRAG_SINGLE 1
+#define IWX_TE_V2_FRAG_DUAL 2
+#define IWX_TE_V2_FRAG_MAX 0xfe
+#define IWX_TE_V2_FRAG_ENDLESS 0xff
+
+/* Repeat the time event endlessly (until removed) */
+#define IWX_TE_V2_REPEAT_ENDLESS 0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define IWX_TE_V2_REPEAT_MAX 0xfe
+
+#define IWX_TE_V2_PLACEMENT_POS 12
+#define IWX_TE_V2_ABSENCE_POS 15
+
+/* Time event policy values
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @IWX_TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @IWX_TE_V2_DEP_OTHER: depends on another time event
+ * @IWX_TE_V2_DEP_TSF: depends on a specific time
+ * @IWX_TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of the same MAC
+ * @IWX_TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+#define IWX_TE_V2_DEFAULT_POLICY 0x0
+
+/* notifications (event start/stop, fragment start/stop) */
+#define IWX_TE_V2_NOTIF_HOST_EVENT_START (1 << 0)
+#define IWX_TE_V2_NOTIF_HOST_EVENT_END (1 << 1)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_START (1 << 2)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_END (1 << 3)
+
+#define IWX_TE_V2_NOTIF_HOST_FRAG_START (1 << 4)
+#define IWX_TE_V2_NOTIF_HOST_FRAG_END (1 << 5)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_START (1 << 6)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_END (1 << 7)
+#define IWX_T2_V2_START_IMMEDIATELY (1 << 11)
+
+#define IWX_TE_V2_NOTIF_MSK 0xff
+
+/* placement characteristics */
+#define IWX_TE_V2_DEP_OTHER (1 << IWX_TE_V2_PLACEMENT_POS)
+#define IWX_TE_V2_DEP_TSF (1 << (IWX_TE_V2_PLACEMENT_POS + 1))
+#define IWX_TE_V2_EVENT_SOCIOPATHIC (1 << (IWX_TE_V2_PLACEMENT_POS + 2))
+
+/* are we present or absent during the Time Event. */
+#define IWX_TE_V2_ABSENCE (1 << IWX_TE_V2_ABSENCE_POS)
+
+/**
+ * struct iwx_time_event_cmd_api - configuring Time Events
+ * with struct IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWX_UCODE_TLV_FLAGS)
+ * ( IWX_TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be IWX_TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of IWX_TE_INDEPENDENT, IWX_TE_DEP_OTHER, IWX_TE_DEP_TSF
+ * IWX_TE_EVENT_SOCIOPATHIC
+ * using IWX_TE_ABSENCE and using IWX_TE_NOTIF_*
+ */
+struct iwx_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t id;
+ /* IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 */
+ uint32_t apply_time;
+ uint32_t max_delay;
+ uint32_t depends_on;
+ uint32_t interval;
+ uint32_t duration;
+ uint8_t repeat;
+ uint8_t max_frags;
+ uint16_t policy;
+} __packed; /* IWX_MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_time_event_resp - response structure to iwx_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwx_time_event_resp {
+ uint32_t status;
+ uint32_t id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+} __packed; /* IWX_MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwx_time_event_notif - notifications of time event start/stop
+ * ( IWX_TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of IWX_TE_NOTIF_START or IWX_TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwx_time_event_notif {
+ uint32_t timestamp;
+ uint32_t session_id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t status;
+} __packed; /* IWX_MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwx_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (its duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ * @SESSION_PROTECT_CONF_MAX_ID: not used
+ */
+enum iwx_session_prot_conf_id {
+ IWX_SESSION_PROTECT_CONF_ASSOC,
+ IWX_SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ IWX_SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ IWX_SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+ IWX_SESSION_PROTECT_CONF_MAX_ID,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwx_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwx_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwx_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t conf_id;
+ uint32_t duration_tu;
+ uint32_t repetition_count;
+ uint32_t interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: see &enum iwx_mvm_session_prot_conf_id
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwx_session_prot_notif {
+ uint32_t mac_id;
+ uint32_t status;
+ uint32_t start;
+ uint32_t conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwx_binding_cmd - configuring bindings
+ * ( IWX_BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwx_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_BINDING_DATA_API_S_VER_1 */
+ uint32_t macs[IWX_MAX_MACS_IN_BINDING];
+ uint32_t phy;
+ uint32_t lmac_id;
+} __packed; /* IWX_BINDING_CMD_API_S_VER_2 */
+
+#define IWX_LMAC_24G_INDEX 0
+#define IWX_LMAC_5G_INDEX 1
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWX_MAX_QUOTA 128
+
+/**
+ * struct iwx_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remaining quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwx_time_quota_data {
+ uint32_t id_and_color;
+ uint32_t quota;
+ uint32_t max_duration;
+} __packed; /* IWX_TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_time_quota_cmd - configuration of time quota between bindings
+ * ( IWX_TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwx_time_quota_cmd {
+ struct iwx_time_quota_data quotas[IWX_MAX_BINDINGS];
+} __packed; /* IWX_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define IWX_PHY_BAND_5 (0)
+#define IWX_PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define IWX_PHY_VHT_CHANNEL_MODE20 (0x0)
+#define IWX_PHY_VHT_CHANNEL_MODE40 (0x1)
+#define IWX_PHY_VHT_CHANNEL_MODE80 (0x2)
+#define IWX_PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define IWX_PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define IWX_PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define IWX_PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define IWX_PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define IWX_PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define IWX_PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define IWX_PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define IWX_PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: IWX_PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwx_fw_channel_info_v1 {
+ uint8_t band;
+ uint8_t channel;
+ uint8_t width;
+ uint8_t ctrl_pos;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwx_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwx_fw_channel_info {
+ uint32_t channel;
+ uint8_t band;
+ uint8_t width;
+ uint8_t ctrl_pos;
+ uint8_t reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
+
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define IWX_PHY_RX_CHAIN_VALID_POS (1)
+#define IWX_PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_VALID_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_SEL_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define IWX_PHY_RX_CHAIN_CNT_POS (10)
+#define IWX_PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_MIMO_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define IWX_NUM_PHY_CTX 3
+
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( IWX_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @lmac_id: the lmac id the phy context belongs to
+ * @ci: channel info
+ * @rxchain_info: ???
+ * @dsp_cfg_flags: set to 0
+ * @reserved: reserved to align to 64 bit
+ */
+struct iwx_phy_context_cmd_uhb {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ struct iwx_fw_channel_info ci;
+ uint32_t lmac_id;
+ uint32_t rxchain_info;
+ uint32_t dsp_cfg_flags;
+ uint32_t reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
+struct iwx_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
+ struct iwx_fw_channel_info_v1 ci;
+ uint32_t lmac_id;
+ uint32_t rxchain_info; /* reserved in _VER_4 */
+ uint32_t dsp_cfg_flags;
+ uint32_t reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwx_phy_context_cmd - config of the PHY context
+ * ( IWX_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+/*
+ * XXX Intel forgot to bump the PHY_CONTEXT command API when they increased
+ * the size of fw_channel_info from v1 to v2.
+ * To keep things simple we define two versions of this struct, and both
+ * are labeled as CMD_API_VER_1. (The Linux iwlwifi driver performs dark
+ * magic with pointers to struct members instead.)
+ */
+/* This version must be used if IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS is set: */
+struct iwx_phy_context_cmd_uhb_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwx_fw_channel_info ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWX_PHY_CONTEXT_CMD_API_VER_1 */
+/* This version must be used otherwise: */
+struct iwx_phy_context_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwx_fw_channel_info_v1 ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWX_PHY_CONTEXT_CMD_API_VER_1 */
+
+
+#define IWX_RX_INFO_PHY_CNT 8
+#define IWX_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWX_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWX_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWX_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWX_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWX_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWX_RX_INFO_ENERGY_ANT_C_POS 16
+
+#define IWX_RX_INFO_AGC_IDX 1
+#define IWX_RX_INFO_RSSI_AB_IDX 2
+#define IWX_OFDM_AGC_A_MSK 0x0000007f
+#define IWX_OFDM_AGC_A_POS 0
+#define IWX_OFDM_AGC_B_MSK 0x00003f80
+#define IWX_OFDM_AGC_B_POS 7
+#define IWX_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWX_OFDM_AGC_CODE_POS 20
+#define IWX_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWX_OFDM_RSSI_A_POS 0
+#define IWX_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWX_OFDM_RSSI_ALLBAND_A_POS 8
+#define IWX_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWX_OFDM_RSSI_B_POS 16
+#define IWX_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWX_OFDM_RSSI_ALLBAND_B_POS 24
+
+/**
+ * struct iwx_rx_phy_info - phy info
+ * (IWX_REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: IWX_RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwx_rx_phy_info {
+ uint8_t non_cfg_phy_cnt;
+ uint8_t cfg_phy_cnt;
+ uint8_t stat_id;
+ uint8_t reserved1;
+ uint32_t system_timestamp;
+ uint64_t timestamp;
+ uint32_t beacon_time_stamp;
+ uint16_t phy_flags;
+#define IWX_PHY_INFO_FLAG_SHPREAMBLE (1 << 2)
+ uint16_t channel;
+ uint32_t non_cfg_phy[IWX_RX_INFO_PHY_CNT];
+ uint32_t rate_n_flags;
+ uint32_t byte_count;
+ uint16_t mac_active_msk;
+ uint16_t frame_time;
+} __packed;
+
+struct iwx_rx_mpdu_res_start {
+ uint16_t byte_count;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * Values to parse %iwx_rx_phy_info phy_flags
+ * @IWX_RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @IWX_RX_RES_PHY_FLAGS_MOD_CCK:
+ * @IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @IWX_RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @IWX_RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @IWX_RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+#define IWX_RX_RES_PHY_FLAGS_BAND_24 (1 << 0)
+#define IWX_RX_RES_PHY_FLAGS_MOD_CCK (1 << 1)
+#define IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE (1 << 2)
+#define IWX_RX_RES_PHY_FLAGS_NARROW_BAND (1 << 3)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA (0x7 << 4)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define IWX_RX_RES_PHY_FLAGS_AGG (1 << 7)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_HT (1 << 8)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_GF (1 << 9)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_VHT (1 << 10)
+
+/**
+ * Values written by fw for each Rx packet
+ * @IWX_RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @IWX_RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_VALID:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @IWX_RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @IWX_RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @IWX_RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @IWX_RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_RRF_KILL:
+ * @IWX_RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+#define IWX_RX_MPDU_RES_STATUS_CRC_OK (1 << 0)
+#define IWX_RX_MPDU_RES_STATUS_OVERRUN_OK (1 << 1)
+#define IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND (1 << 2)
+#define IWX_RX_MPDU_RES_STATUS_KEY_VALID (1 << 3)
+#define IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK (1 << 4)
+#define IWX_RX_MPDU_RES_STATUS_ICV_OK (1 << 5)
+#define IWX_RX_MPDU_RES_STATUS_MIC_OK (1 << 6)
+#define IWX_RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC (0 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC (1 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC (2 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC (3 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_EXT_ENC (4 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC (6 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_DEC_DONE (1 << 11)
+#define IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP (1 << 12)
+#define IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP (1 << 13)
+#define IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT (1 << 14)
+#define IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME (1 << 15)
+#define IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK (0x3F0000)
+#define IWX_RX_MPDU_RES_STATUS_STA_ID_MSK (0x1f000000)
+#define IWX_RX_MPDU_RES_STATUS_RRF_KILL (1 << 29)
+#define IWX_RX_MPDU_RES_STATUS_FILTERING_MSK (0xc00000)
+#define IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK (0xc0000000)
+
+#define IWX_RX_MPDU_MFLG1_ADDRTYPE_MASK 0x03
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK 0xf0
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT 3
+
+#define IWX_RX_MPDU_MFLG2_HDR_LEN_MASK 0x1f
+#define IWX_RX_MPDU_MFLG2_PAD 0x20
+#define IWX_RX_MPDU_MFLG2_AMSDU 0x40
+
+#define IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK 0x7f
+#define IWX_RX_MPDU_AMSDU_LAST_SUBFRAME 0x80
+
+#define IWX_RX_MPDU_PHY_AMPDU (1 << 5)
+#define IWX_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6)
+#define IWX_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7)
+#define IWX_RX_MPDU_PHY_NCCK_ADDTL_NTFY (1 << 7)
+#define IWX_RX_MPDU_PHY_TSF_OVERLOAD (1 << 8)
+
+struct iwx_rx_mpdu_desc_v3 {
+ union {
+ uint32_t filter_match;
+ uint32_t phy_data3;
+ };
+ union {
+ uint32_t rss_hash;
+ uint32_t phy_data2;
+ };
+ uint32_t partial_hash; /* ip/tcp header hash w/o some fields */
+ uint16_t raw_xsum;
+ uint16_t reserved_xsum;
+ uint32_t rate_n_flags;
+ uint8_t energy_a;
+ uint8_t energy_b;
+ uint8_t channel;
+ uint8_t mac_context;
+ uint32_t gp2_on_air_rise;
+ union {
+ /*
+ * TSF value on air rise (INA), only valid if
+ * IWX_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ struct {
+ uint32_t tsf_on_air_rise0;
+ uint32_t tsf_on_air_rise1;
+ };
+
+ struct {
+ uint32_t phy_data0;
+
+ /* Only valid if IWX_RX_MPDU_PHY_TSF_OVERLOAD is set. */
+ uint32_t phy_data1;
+ };
+ };
+ uint32_t reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+struct iwx_rx_mpdu_desc_v1 {
+ union {
+ uint32_t rss_hash;
+ uint32_t phy_data2;
+ };
+ union {
+ uint32_t filter_match;
+ uint32_t phy_data3;
+ };
+ uint32_t rate_n_flags;
+ uint8_t energy_a;
+ uint8_t energy_b;
+ uint8_t channel;
+ uint8_t mac_context;
+ uint32_t gp2_on_air_rise;
+ union {
+ struct {
+ uint32_t tsf_on_air_rise0;
+ uint32_t tsf_on_air_rise1;
+ };
+ struct {
+ uint32_t phy_data0;
+ uint32_t phy_data1;
+ };
+ };
+} __packed;
+
+#define IWX_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+#define IWX_RX_MPDU_REORDER_NSSN_MASK 0x00000fff
+#define IWX_RX_MPDU_REORDER_SN_MASK 0x00fff000
+#define IWX_RX_MPDU_REORDER_SN_SHIFT 12
+#define IWX_RX_MPDU_REORDER_BAID_MASK 0x7f000000
+#define IWX_RX_MPDU_REORDER_BAID_SHIFT 24
+#define IWX_RX_MPDU_REORDER_BA_OLD_SN 0x80000000
+
+struct iwx_rx_mpdu_desc {
+ uint16_t mpdu_len;
+ uint8_t mac_flags1;
+ uint8_t mac_flags2;
+ uint8_t amsdu_info;
+ uint16_t phy_info;
+ uint8_t mac_phy_idx;
+ uint16_t raw_csum;
+ union {
+ uint16_t l3l4_flags;
+ uint16_t phy_data4;
+ };
+ uint16_t status;
+ uint8_t hash_filter;
+ uint8_t sta_id_flags;
+ uint32_t reorder_data;
+ union {
+ struct iwx_rx_mpdu_desc_v1 v1;
+ struct iwx_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_4,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+#define IWX_RX_DESC_SIZE_V1 ((sizeof(struct iwx_rx_mpdu_desc) - \
+ sizeof(struct iwx_rx_mpdu_desc_v3)) + sizeof(struct iwx_rx_mpdu_desc_v1))
+
+struct iwx_frame_release {
+ uint8_t baid;
+ uint8_t reserved;
+ uint16_t nssn;
+};
+
+/**
+ * enum iwx_bar_frame_release_sta_tid - STA/TID information for BAR release
+ * @IWX_BAR_FRAME_RELEASE_TID_MASK: TID mask
+ * @IWX_BAR_FRAME_RELEASE_STA_MASK: STA mask
+ */
+#define IWX_BAR_FRAME_RELEASE_TID_MASK 0x0000000f
+#define IWX_BAR_FRAME_RELEASE_STA_MASK 0x000001f0
+#define IWX_BAR_FRAME_RELEASE_STA_SHIFT 4
+
+/**
+ * enum iwx_bar_frame_release_ba_info - BA information for BAR release
+ * @IWL_BAR_FRAME_RELEASE_NSSN_MASK: NSSN mask
+ * @IWL_BAR_FRAME_RELEASE_SN_MASK: SN mask (ignored by driver)
+ * @IWL_BAR_FRAME_RELEASE_BAID_MASK: BAID mask
+ */
+#define IWX_BAR_FRAME_RELEASE_NSSN_MASK 0x00000fff
+#define IWX_BAR_FRAME_RELEASE_SN_MASK 0x00fff000
+#define IWX_BAR_FRAME_RELEASE_SN_SHIFT 12
+#define IWX_BAR_FRAME_RELEASE_BAID_MASK 0x3f000000
+#define IWX_BAR_FRAME_RELEASE_BAID_SHIFT 24
+
+/**
+ * struct iwx_bar_frame_release - frame release from BAR info
+ * @sta_tid: STA & TID information, see &enum iwx_bar_frame_release_sta_tid.
+ * @ba_info: BA information, see &enum iwx_bar_frame_release_ba_info.
+ */
+struct iwx_bar_frame_release {
+ uint32_t sta_tid;
+ uint32_t ba_info;
+} __packed; /* RX_BAR_TO_FRAME_RELEASE_API_S_VER_1 */
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * ( IWX_RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwx_radio_version_notif {
+ uint32_t radio_flavor;
+ uint32_t radio_step;
+ uint32_t radio_dash;
+} __packed; /* IWX_RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+#define IWX_CARD_ENABLED 0x00
+#define IWX_HW_CARD_DISABLED 0x01
+#define IWX_SW_CARD_DISABLED 0x02
+#define IWX_CT_KILL_CARD_DISABLED 0x04
+#define IWX_HALT_CARD_DISABLED 0x08
+#define IWX_CARD_DISABLED_MSK 0x0f
+#define IWX_CARD_IS_RX_ON 0x10
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * (IWX_CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwx_card_state_flags
+ */
+struct iwx_card_state_notif {
+ uint32_t flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwx_missed_beacons_notif - information on missed beacons
+ * ( IWX_MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwx_missed_beacons_notif {
+ uint32_t mac_id;
+ uint32_t consec_missed_beacons_since_last_rx;
+ uint32_t consec_missed_beacons;
+ uint32_t num_expected_beacons;
+ uint32_t num_recvd_beacons;
+} __packed; /* IWX_MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwx_mfuart_load_notif - mfuart image version & status
+ * ( IWX_MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwx_mfuart_load_notif {
+ uint32_t installed_ver;
+ uint32_t external_ver;
+ uint32_t status;
+ uint32_t duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
+ * struct iwx_set_calib_default_cmd - set default value for calibration.
+ * ( IWX_SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwx_set_calib_default_cmd {
+ uint16_t calib_index;
+ uint16_t length;
+ uint8_t data[0];
+} __packed; /* IWX_PHY_CALIB_OVERRIDE_VALUES_S */
+
+#define IWX_MAX_PORT_ID_NUM 2
+#define IWX_MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwx_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * IWX_MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwx_mcast_filter_cmd {
+ uint8_t filter_own;
+ uint8_t port_id;
+ uint8_t count;
+ uint8_t pass_all;
+ uint8_t bssid[6];
+ uint8_t reserved[2];
+ uint8_t addr_list[0];
+} __packed; /* IWX_MCAST_FILTERING_CMD_API_S_VER_1 */
+
+struct iwx_statistics_dbg {
+ uint32_t burst_check;
+ uint32_t burst_count;
+ uint32_t wait_for_silence_timeout_cnt;
+ uint32_t reserved[3];
+} __packed; /* IWX_STATISTICS_DEBUG_API_S_VER_2 */
+
+struct iwx_statistics_div {
+ uint32_t tx_on_a;
+ uint32_t tx_on_b;
+ uint32_t exec_time;
+ uint32_t probe_time;
+ uint32_t rssi_ant;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct iwx_statistics_bt_activity {
+ uint32_t hi_priority_tx_req_cnt;
+ uint32_t hi_priority_tx_denied_cnt;
+ uint32_t lo_priority_tx_req_cnt;
+ uint32_t lo_priority_tx_denied_cnt;
+ uint32_t hi_priority_rx_req_cnt;
+ uint32_t hi_priority_rx_denied_cnt;
+ uint32_t lo_priority_rx_req_cnt;
+ uint32_t lo_priority_rx_denied_cnt;
+} __packed; /* IWX_STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct iwx_statistics_general_common {
+ uint32_t radio_temperature;
+ struct iwx_statistics_dbg dbg;
+ uint32_t sleep_time;
+ uint32_t slots_out;
+ uint32_t slots_idle;
+ uint32_t ttl_timestamp;
+ struct iwx_statistics_div slow_div;
+ uint32_t rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ uint32_t num_of_sos_states;
+ uint32_t beacon_filtered;
+ uint32_t missed_beacons;
+ uint8_t beacon_filter_average_energy;
+ uint8_t beacon_filter_reason;
+ uint8_t beacon_filter_current_energy;
+ uint8_t beacon_filter_reserved;
+ uint32_t beacon_filter_delta_time;
+ struct iwx_statistics_bt_activity bt_activity;
+ uint64_t rx_time;
+ uint64_t on_time_rf;
+ uint64_t on_time_scan;
+ uint64_t tx_time;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx_non_phy {
+ uint32_t bogus_cts; /* CTS received when not expecting CTS */
+ uint32_t bogus_ack; /* ACK received when not expecting ACK */
+ uint32_t non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ uint32_t filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ uint32_t non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ uint32_t channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ uint32_t num_missed_bcon; /* number of missed beacons */
+ uint32_t adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ uint32_t ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ uint32_t beacon_silence_rssi[3];/* RSSI silence after beacon frame */
+ uint32_t interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ uint32_t channel_load; /* counts RX Enable time in uSec */
+ uint32_t dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ uint32_t beacon_rssi_a;
+ uint32_t beacon_rssi_b;
+ uint32_t beacon_rssi_c;
+ uint32_t beacon_energy_a;
+ uint32_t beacon_energy_b;
+ uint32_t beacon_energy_c;
+ uint32_t num_bt_kills;
+ uint32_t mac_id;
+ uint32_t directed_data_mpdu;
+} __packed; /* IWX_STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct iwx_statistics_rx_phy {
+ uint32_t ina_cnt;
+ uint32_t fina_cnt;
+ uint32_t plcp_err;
+ uint32_t crc32_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t false_alarm_cnt;
+ uint32_t fina_sync_err_cnt;
+ uint32_t sfd_timeout;
+ uint32_t fina_timeout;
+ uint32_t unresponded_rts;
+ uint32_t rxe_frame_limit_overrun;
+ uint32_t sent_ack_cnt;
+ uint32_t sent_cts_cnt;
+ uint32_t sent_ba_rsp_cnt;
+ uint32_t dsp_self_kill;
+ uint32_t mh_format_err;
+ uint32_t re_acq_main_rssi_sum;
+ uint32_t reserved;
+} __packed; /* IWX_STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct iwx_statistics_rx_ht_phy {
+ uint32_t plcp_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t crc32_err;
+ uint32_t mh_format_err;
+ uint32_t agg_crc32_good;
+ uint32_t agg_mpdu_cnt;
+ uint32_t agg_cnt;
+ uint32_t unsupport_mcs;
+} __packed; /* IWX_STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define IWX_MAC_INDEX_AUX 4
+#define IWX_MAC_INDEX_MIN_DRIVER 0
+#define IWX_NUM_MAC_INDEX_DRIVER IWX_MAC_INDEX_AUX
+
+#define IWX_STATION_COUNT 16
+
+#define IWX_MAX_CHAINS 3
+
+struct iwx_statistics_tx_non_phy_agg {
+ uint32_t ba_timeout;
+ uint32_t ba_reschedule_frames;
+ uint32_t scd_query_agg_frame_cnt;
+ uint32_t scd_query_no_agg;
+ uint32_t scd_query_agg;
+ uint32_t scd_query_mismatch;
+ uint32_t frame_not_ready;
+ uint32_t underrun;
+ uint32_t bt_prio_kill;
+ uint32_t rx_ba_rsp_cnt;
+ int8_t txpower[IWX_MAX_CHAINS];
+ int8_t reserved;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct iwx_statistics_tx_channel_width {
+ uint32_t ext_cca_narrow_ch20[1];
+ uint32_t ext_cca_narrow_ch40[2];
+ uint32_t ext_cca_narrow_ch80[3];
+ uint32_t ext_cca_narrow_ch160[4];
+ uint32_t last_tx_ch_width_indx;
+ uint32_t rx_detected_per_ch_width[4];
+ uint32_t success_per_ch_width[4];
+ uint32_t fail_per_ch_width[4];
+}; /* IWX_STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct iwx_statistics_tx {
+ uint32_t preamble_cnt;
+ uint32_t rx_detected_cnt;
+ uint32_t bt_prio_defer_cnt;
+ uint32_t bt_prio_kill_cnt;
+ uint32_t few_bytes_cnt;
+ uint32_t cts_timeout;
+ uint32_t ack_timeout;
+ uint32_t expected_ack_cnt;
+ uint32_t actual_ack_cnt;
+ uint32_t dump_msdu_cnt;
+ uint32_t burst_abort_next_frame_mismatch_cnt;
+ uint32_t burst_abort_missing_next_frame_cnt;
+ uint32_t cts_timeout_collision;
+ uint32_t ack_or_ba_timeout_collision;
+ struct iwx_statistics_tx_non_phy_agg agg;
+ struct iwx_statistics_tx_channel_width channel_width;
+} __packed; /* IWX_STATISTICS_TX_API_S_VER_4 */
+
+struct iwx_statistics_general {
+ struct iwx_statistics_general_common common;
+ uint32_t beacon_counter[IWX_MAC_INDEX_AUX];
+ uint8_t beacon_average_energy[IWX_MAC_INDEX_AUX];
+ uint8_t reserved[8 - IWX_MAC_INDEX_AUX];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx {
+ struct iwx_statistics_rx_phy ofdm;
+ struct iwx_statistics_rx_phy cck;
+ struct iwx_statistics_rx_non_phy general;
+ struct iwx_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* IWX_STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * IWX_STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * IWX_REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues IWX_REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+/**
+ * struct iwx_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct iwx_statistics_load {
+ uint32_t air_time[IWX_MAC_INDEX_AUX];
+ uint32_t byte_count[IWX_MAC_INDEX_AUX];
+ uint32_t pkt_count[IWX_MAC_INDEX_AUX];
+ uint8_t avg_energy[IWX_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
+
+struct iwx_notif_statistics {
+ uint32_t flag;
+ struct iwx_statistics_rx rx;
+ struct iwx_statistics_tx tx;
+ struct iwx_statistics_general general;
+ struct iwx_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
+
+
+/**
+ * flags used in statistics notification
+ * @IWX_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+#define IWX_STATISTICS_REPLY_FLG_CLEAR 0x01
+
+/**
+ * flags used in statistics command
+ * @IWX_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ * that's sent after this command
+ * @IWX_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ * notifications
+ */
+#define IWX_STATISTICS_FLG_CLEAR 0x01
+#define IWX_STATISTICS_FLG_DISABLE_NOTIF 0x02
+
+/**
+ * struct iwx_statistics_cmd - statistics config command
+ * @flags: IWX_STATISTICS_* flags
+ */
+struct iwx_statistics_cmd {
+ uint32_t flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
+
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+#define IWX_SF_LONG_DELAY_ON 0 /* should never be called by driver */
+#define IWX_SF_FULL_ON 1
+#define IWX_SF_UNINIT 2
+#define IWX_SF_INIT_OFF 3
+#define IWX_SF_HW_NUM_STATES 4
+
+/* Smart Fifo possible scenario */
+#define IWX_SF_SCENARIO_SINGLE_UNICAST 0
+#define IWX_SF_SCENARIO_AGG_UNICAST 1
+#define IWX_SF_SCENARIO_MULTICAST 2
+#define IWX_SF_SCENARIO_BA_RESP 3
+#define IWX_SF_SCENARIO_TX_RESP 4
+#define IWX_SF_NUM_SCENARIO 5
+
+#define IWX_SF_TRANSIENT_STATES_NUMBER 2 /* IWX_SF_LONG_DELAY_ON and IWX_SF_FULL_ON */
+#define IWX_SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define IWX_SF_W_MARK_SISO 4096
+#define IWX_SF_W_MARK_MIMO2 8192
+#define IWX_SF_W_MARK_MIMO3 6144
+#define IWX_SF_W_MARK_LEGACY 4096
+#define IWX_SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define IWX_SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define IWX_SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define IWX_SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+#define IWX_SF_CFG_DUMMY_NOTIF_OFF (1 << 16)
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in enum %iwx_sf_state.
+ * @watermark: Minimum allowed available free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwx_sf_cfg_cmd {
+ uint32_t state;
+ uint32_t watermark[IWX_SF_TRANSIENT_STATES_NUMBER];
+ uint32_t long_delay_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+ uint32_t full_on_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+} __packed; /* IWX_SF_CFG_API_S_VER_2 */
+
+#define IWX_AC_BK 0
+#define IWX_AC_BE 1
+#define IWX_AC_VI 2
+#define IWX_AC_VO 3
+#define IWX_AC_NUM 4
+
+/**
+ * MAC context flags
+ * @IWX_MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @IWX_MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @IWX_MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @IWX_MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+#define IWX_MAC_PROT_FLG_TGG_PROTECT (1 << 3)
+#define IWX_MAC_PROT_FLG_HT_PROT (1 << 23)
+#define IWX_MAC_PROT_FLG_FAT_PROT (1 << 24)
+#define IWX_MAC_PROT_FLG_SELF_CTS_EN (1 << 30)
+
+#define IWX_MAC_FLG_SHORT_SLOT (1 << 4)
+#define IWX_MAC_FLG_SHORT_PREAMBLE (1 << 5)
+
+/**
+ * Supported MAC types
+ * @IWX_FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @IWX_FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @IWX_FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @IWX_FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @IWX_FW_MAC_TYPE_IBSS: IBSS
+ * @IWX_FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @IWX_FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @IWX_FW_MAC_TYPE_P2P_STA: P2P client
+ * @IWX_FW_MAC_TYPE_GO: P2P GO
+ * @IWX_FW_MAC_TYPE_TEST: ?
+ * @IWX_FW_MAC_TYPE_MAX: highest support MAC type
+ */
+#define IWX_FW_MAC_TYPE_FIRST 1
+#define IWX_FW_MAC_TYPE_AUX IWX_FW_MAC_TYPE_FIRST
+#define IWX_FW_MAC_TYPE_LISTENER 2
+#define IWX_FW_MAC_TYPE_PIBSS 3
+#define IWX_FW_MAC_TYPE_IBSS 4
+#define IWX_FW_MAC_TYPE_BSS_STA 5
+#define IWX_FW_MAC_TYPE_P2P_DEVICE 6
+#define IWX_FW_MAC_TYPE_P2P_STA 7
+#define IWX_FW_MAC_TYPE_GO 8
+#define IWX_FW_MAC_TYPE_TEST 9
+#define IWX_FW_MAC_TYPE_MAX IWX_FW_MAC_TYPE_TEST
+/* IWX_MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * TSF hw timer ID
+ * @IWX_TSF_ID_A: use TSF A
+ * @IWX_TSF_ID_B: use TSF B
+ * @IWX_TSF_ID_C: use TSF C
+ * @IWX_TSF_ID_D: use TSF D
+ * @IWX_NUM_TSF_IDS: number of TSF timers available
+ */
+#define IWX_TSF_ID_A 0
+#define IWX_TSF_ID_B 1
+#define IWX_TSF_ID_C 2
+#define IWX_TSF_ID_D 3
+#define IWX_NUM_TSF_IDS 4
+/* IWX_TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwx_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * NOTE: obsolete from VER2 and on
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ap {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t mcast_qid;
+ uint32_t beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ibss {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwx_mac_data_policy - policy of the data path for this MAC
+ * @TWT_SUPPORTED: twt is supported
+ * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to
+ * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT
+ * early termination detection.
+ * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
+ * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
+ * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT
+ * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used
+ * during 802.1X negotiation (and allowed during 4-way-HS)
+ */
+#define IWX_TWT_SUPPORTED BIT (1 << 0)
+#define IWX_MORE_DATA_ACK_SUPPORTED (1 << 1)
+#define IWX_FLEXIBLE_TWT_SUPPORTED (1 << 2)
+#define IWX_PROTECTED_TWT_SUPPORTED (1 << 3)
+#define IWX_BROADCAST_TWT_SUPPORTED (1 << 4)
+#define IWX_COEX_HIGH_PRIORITY_ENABLE (1 << 5)
+
+/**
+ * struct iwx_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @data_policy: see &enum iwl_mac_data_policy
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwx_mac_data_sta {
+ uint32_t is_assoc;
+ uint32_t dtim_time;
+ uint64_t dtim_tsf;
+ uint32_t bi;
+ uint32_t reserved1;
+ uint32_t dtim_interval;
+ uint32_t data_policy;
+ uint32_t listen_interval;
+ uint32_t assoc_id;
+ uint32_t assoc_beacon_arrive_time;
+} __packed; /* IWX_STA_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwx_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwx_mac_data_go {
+ struct iwx_mac_data_ap ap;
+ uint32_t ctwin;
+ uint32_t opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwx_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwx_mac_data_p2p_sta {
+ struct iwx_mac_data_sta sta;
+ uint32_t ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwx_mac_data_pibss {
+ uint32_t stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwx_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwx_mac_data_p2p_dev {
+ uint32_t is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * MAC context filter flags
+ * @IWX_MAC_FILTER_IN_PROMISC: accept all data frames
+ * @IWX_MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
+ * control frames to the host
+ * @IWX_MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @IWX_MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @IWX_MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @IWX_MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @IWX_MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @IWX_MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @IWX_MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+#define IWX_MAC_FILTER_IN_PROMISC (1 << 0)
+#define IWX_MAC_FILTER_IN_CONTROL_AND_MGMT (1 << 1)
+#define IWX_MAC_FILTER_ACCEPT_GRP (1 << 2)
+#define IWX_MAC_FILTER_DIS_DECRYPT (1 << 3)
+#define IWX_MAC_FILTER_DIS_GRP_DECRYPT (1 << 4)
+#define IWX_MAC_FILTER_IN_BEACON (1 << 6)
+#define IWX_MAC_FILTER_OUT_BCAST (1 << 8)
+#define IWX_MAC_FILTER_IN_CRC32 (1 << 11)
+#define IWX_MAC_FILTER_IN_PROBE_REQUEST (1 << 12)
+
+/**
+ * QoS flags
+ * @IWX_MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @IWX_MAC_QOS_FLG_TGN: HT is enabled
+ * @IWX_MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+#define IWX_MAC_QOS_FLG_UPDATE_EDCA (1 << 0)
+#define IWX_MAC_QOS_FLG_TGN (1 << 1)
+#define IWX_MAC_QOS_FLG_TXOP_TYPE (1 << 4)
+
+/**
+ * struct iwx_ac_qos - QOS timing params for IWX_MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwx_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwx_ac_qos {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint8_t aifsn;
+ uint8_t fifos_mask;
+ uint16_t edca_txop;
+} __packed; /* IWX_AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( IWX_MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @mac_type: one of IWX_FW_MAC_TYPE_*
+ * @tsf_id: TSF HW timer, one of IWX_TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of IWX_MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of IWX_MAC_FILTER_*
+ * @qos_flags: from IWX_MAC_QOS_FLG_*
+ * @ac: one iwx_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwx_mac_data_*, according to mac_type
+ */
+struct iwx_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ uint32_t mac_type;
+ uint32_t tsf_id;
+ uint8_t node_addr[6];
+ uint16_t reserved_for_node_addr;
+ uint8_t bssid_addr[6];
+ uint16_t reserved_for_bssid_addr;
+ uint32_t cck_rates;
+ uint32_t ofdm_rates;
+ uint32_t protection_flags;
+ uint32_t cck_short_preamble;
+ uint32_t short_slot;
+ uint32_t filter_flags;
+ /* IWX_MAC_QOS_PARAM_API_S_VER_1 */
+ uint32_t qos_flags;
+ struct iwx_ac_qos ac[IWX_AC_NUM+1];
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwx_mac_data_ap ap;
+ struct iwx_mac_data_go go;
+ struct iwx_mac_data_sta sta;
+ struct iwx_mac_data_p2p_sta p2p_sta;
+ struct iwx_mac_data_p2p_dev p2p_dev;
+ struct iwx_mac_data_pibss pibss;
+ struct iwx_mac_data_ibss ibss;
+ };
+} __packed; /* IWX_MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline uint32_t iwx_reciprocal(uint32_t v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * masks for LTR config command flags
+ * @IWX_LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ * memory access
+ * @IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ * reg change
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ * D0 to D3
+ * @IWX_LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @IWX_LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+#define IWX_LTR_CFG_FLAG_FEATURE_ENABLE 0x00000001
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS 0x00000002
+#define IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH 0x00000004
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 0x00000008
+#define IWX_LTR_CFG_FLAG_SW_SET_SHORT 0x00000010
+#define IWX_LTR_CFG_FLAG_SW_SET_LONG 0x00000020
+#define IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD 0x00000040
+
+#define IWX_LTR_VALID_STATES_NUM 4
+
+/**
+ * struct iwx_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwx_ltr_config_flags
+ * @static_long:
+ * @static_short:
+ * @ltr_cfg_values:
+ * @ltr_short_idle_timeout:
+ */
+struct iwx_ltr_config_cmd {
+ uint32_t flags;
+ uint32_t static_long;
+ uint32_t static_short;
+ uint32_t ltr_cfg_values[IWX_LTR_VALID_STATES_NUM];
+ uint32_t ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define IWX_POWER_LPRX_RSSI_THRESHOLD 75
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+/**
+ * Masks for iwx_mac_power_cmd command flags
+ * @IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @IWX_POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @IWX_POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @IWX_POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @IWX_POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
+*/
+#define IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK (1 << 1)
+#define IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK (1 << 2)
+#define IWX_POWER_FLAGS_SNOOZE_ENA_MSK (1 << 5)
+#define IWX_POWER_FLAGS_BT_SCO_ENA (1 << 8)
+#define IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK (1 << 9)
+#define IWX_POWER_FLAGS_LPRX_ENA_MSK (1 << 11)
+#define IWX_POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK (1 << 12)
+
+#define IWX_POWER_VEC_SIZE 5
+
+/**
+ * Masks for device power command flags
+ * @IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ * '1' Allow to save power by turning off receiver and transmitter.
+ * '0' Do not allow. This flag should be always set to '1' unless
+ * one needs to disable actual power down for debug purposes.
+ * @IWX_DEVICE_POWER_FLAGS_CAM_MSK:
+ * '1' CAM (Continuous Active Mode) is set, power management is disabled.
+ * '0' Power management is enabled, one of the power schemes is applied.
+ */
+#define IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_DEVICE_POWER_FLAGS_CAM_MSK (1 << 13)
+
+/**
+ * struct iwx_device_power_cmd - device wide power command.
+ * IWX_POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from IWX_DEVICE_POWER_FLAGS_*
+ */
+struct iwx_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ uint16_t flags;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * struct iwx_mac_power_cmd - New power command containing uAPSD support
+ * IWX_MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC context identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwx_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ uint32_t id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ uint16_t flags;
+ uint16_t keep_alive_seconds;
+ uint32_t rx_data_timeout;
+ uint32_t tx_data_timeout;
+ uint32_t rx_data_timeout_uapsd;
+ uint32_t tx_data_timeout_uapsd;
+ uint8_t lprx_rssi_threshold;
+ uint8_t skip_dtim_periods;
+ uint16_t snooze_interval;
+ uint16_t snooze_window;
+ uint8_t snooze_step;
+ uint8_t qndp_tid;
+ uint8_t uapsd_ac_flags;
+ uint8_t uapsd_max_sp;
+ uint8_t heavy_tx_thld_packets;
+ uint8_t heavy_rx_thld_packets;
+ uint8_t heavy_tx_thld_percentage;
+ uint8_t heavy_rx_thld_percentage;
+ uint8_t limited_ps_threshold;
+ uint8_t reserved;
+} __packed;
+
+#define IWX_DEFAULT_PS_TX_DATA_TIMEOUT (100 * 1000)
+#define IWX_DEFAULT_PS_RX_DATA_TIMEOUT (100 * 1000)
+
+/*
+ * struct iwx_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwx_uapsd_misbehaving_ap_notif {
+ uint32_t sta_id;
+ uint8_t mac_id;
+ uint8_t reserved[3];
+} __packed;
+
+/**
+ * struct iwx_beacon_filter_cmd
+ * IWX_REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC context identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsius):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temperature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temperature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_escape_timer: Send beacons to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ * @bf_threshold_absolute_low: See below.
+ * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated
+ * for this beacon crossed this absolute threshold. For the 'Increase'
+ * direction the bf_energy_absolute_low[i] is used. For the 'Decrease'
+ * direction the bf_energy_absolute_high[i] is used. Zero value means
+ * that this specific threshold is ignored for beacon filtering, and
+ * beacon will not be forced to be sent to driver due to this setting.
+ */
+struct iwx_beacon_filter_cmd {
+ uint32_t bf_energy_delta;
+ uint32_t bf_roaming_energy_delta;
+ uint32_t bf_roaming_state;
+ uint32_t bf_temp_threshold;
+ uint32_t bf_temp_fast_filter;
+ uint32_t bf_temp_slow_filter;
+ uint32_t bf_enable_beacon_filter;
+ uint32_t bf_debug_flag;
+ uint32_t bf_escape_timer;
+ uint32_t ba_escape_timer;
+ uint32_t ba_enable_beacon_abort;
+ uint32_t bf_threshold_absolute_low[2];
+ uint32_t bf_threshold_absolute_high[2];
+} __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */
+
+/* Beacon filtering and beacon abort */
+#define IWX_BF_ENERGY_DELTA_DEFAULT 5
+#define IWX_BF_ENERGY_DELTA_MAX 255
+#define IWX_BF_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWX_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWX_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_STATE_DEFAULT 72
+#define IWX_BF_ROAMING_STATE_MAX 255
+#define IWX_BF_ROAMING_STATE_MIN 0
+
+#define IWX_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWX_BF_TEMP_THRESHOLD_MAX 255
+#define IWX_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWX_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWX_BF_TEMP_FAST_FILTER_MAX 255
+#define IWX_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWX_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWX_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWX_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWX_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWX_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWX_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWX_BF_ESCAPE_TIMER_MAX 1024
+#define IWX_BF_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWX_BA_ESCAPE_TIMER_D3 9
+#define IWX_BA_ESCAPE_TIMER_MAX 1024
+#define IWX_BA_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWX_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = htole32(IWX_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ htole32(IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = htole32(IWX_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = htole32(IWX_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = htole32(IWX_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = htole32(IWX_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = htole32(IWX_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = htole32(IWX_BF_ESCAPE_TIMER_DEFAULT), \
+ .ba_escape_timer = htole32(IWX_BA_ESCAPE_TIMER_DEFAULT)
+
+/* uCode API values for HT/VHT bit rates */
+#define IWX_RATE_HT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_HT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_HT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_HT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_HT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_HT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_HT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_HT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_HT_MIMO2_MCS_8_PLCP 0x8
+#define IWX_RATE_HT_MIMO2_MCS_9_PLCP 0x9
+#define IWX_RATE_HT_MIMO2_MCS_10_PLCP 0xA
+#define IWX_RATE_HT_MIMO2_MCS_11_PLCP 0xB
+#define IWX_RATE_HT_MIMO2_MCS_12_PLCP 0xC
+#define IWX_RATE_HT_MIMO2_MCS_13_PLCP 0xD
+#define IWX_RATE_HT_MIMO2_MCS_14_PLCP 0xE
+#define IWX_RATE_HT_MIMO2_MCS_15_PLCP 0xF
+#define IWX_RATE_VHT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_VHT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_VHT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_VHT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_VHT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_VHT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_VHT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_VHT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_VHT_SISO_MCS_8_PLCP 8
+#define IWX_RATE_VHT_SISO_MCS_9_PLCP 9
+#define IWX_RATE_VHT_MIMO2_MCS_0_PLCP 0x10
+#define IWX_RATE_VHT_MIMO2_MCS_1_PLCP 0x11
+#define IWX_RATE_VHT_MIMO2_MCS_2_PLCP 0x12
+#define IWX_RATE_VHT_MIMO2_MCS_3_PLCP 0x13
+#define IWX_RATE_VHT_MIMO2_MCS_4_PLCP 0x14
+#define IWX_RATE_VHT_MIMO2_MCS_5_PLCP 0x15
+#define IWX_RATE_VHT_MIMO2_MCS_6_PLCP 0x16
+#define IWX_RATE_VHT_MIMO2_MCS_7_PLCP 0x17
+#define IWX_RATE_VHT_MIMO2_MCS_8_PLCP 0x18
+#define IWX_RATE_VHT_MIMO2_MCS_9_PLCP 0x19
+#define IWX_RATE_HT_SISO_MCS_INV_PLCP 0x20
+#define IWX_RATE_HT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_SISO_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_8_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_9_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+
+/*
+ * These serve as indexes into struct iwx_rate iwx_rates[IWX_RIDX_MAX].
+ */
+enum {
+ IWX_RATE_1M_INDEX = 0,
+ IWX_FIRST_CCK_RATE = IWX_RATE_1M_INDEX,
+ IWX_RATE_2M_INDEX,
+ IWX_RATE_5M_INDEX,
+ IWX_RATE_11M_INDEX,
+ IWX_LAST_CCK_RATE = IWX_RATE_11M_INDEX,
+ IWX_RATE_6M_INDEX,
+ IWX_FIRST_OFDM_RATE = IWX_RATE_6M_INDEX,
+ IWX_RATE_MCS_0_INDEX = IWX_RATE_6M_INDEX,
+ IWX_FIRST_HT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_FIRST_VHT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_9M_INDEX,
+ IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_1_INDEX = IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_FIRST_HT_MIMO2_RATE = IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_18M_INDEX,
+ IWX_RATE_MCS_2_INDEX = IWX_RATE_18M_INDEX,
+ IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_3_INDEX = IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_4_INDEX = IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_5_INDEX = IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_6_INDEX = IWX_RATE_54M_INDEX,
+ IWX_LAST_NON_HT_RATE = IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_LAST_HT_SISO_RATE = IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_HT_RATE = IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_VHT_RATE = IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_COUNT_LEGACY = IWX_LAST_NON_HT_RATE + 1,
+ IWX_RATE_COUNT = IWX_LAST_HT_RATE + 1,
+};
+
+#define IWX_RATE_BIT_MSK(r) (1 << (IWX_RATE_##r##M_INDEX))
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+#define IWX_RATE_6M_PLCP 13
+#define IWX_RATE_9M_PLCP 15
+#define IWX_RATE_12M_PLCP 5
+#define IWX_RATE_18M_PLCP 7
+#define IWX_RATE_24M_PLCP 9
+#define IWX_RATE_36M_PLCP 11
+#define IWX_RATE_48M_PLCP 1
+#define IWX_RATE_54M_PLCP 3
+#define IWX_RATE_1M_PLCP 10
+#define IWX_RATE_2M_PLCP 20
+#define IWX_RATE_5M_PLCP 55
+#define IWX_RATE_11M_PLCP 110
+#define IWX_RATE_INVM_PLCP 0xff
+
+/*
+ * rate_n_flags bit fields version 1
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define IWX_RATE_MCS_HT_POS 8
+#define IWX_RATE_MCS_HT_MSK_V1 (1 << IWX_RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define IWX_RATE_MCS_CCK_POS_V1 9
+#define IWX_RATE_MCS_CCK_MSK_V1 (1 << IWX_RATE_MCS_CCK_POS_V1)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define IWX_RATE_MCS_VHT_POS_V1 26
+#define IWX_RATE_MCS_VHT_MSK_V1 (1 << IWX_RATE_MCS_VHT_POS_V1)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 0x7
+#define IWX_RATE_HT_MCS_NSS_POS_V1 3
+#define IWX_RATE_HT_MCS_NSS_MSK_V1 (3 << IWX_RATE_HT_MCS_NSS_POS_V1)
+#define IWX_RATE_HT_MCS_MIMO2_MSK_V1 (1 << IWX_RATE_HT_MCS_NSS_POS_V1)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define IWX_RATE_HT_MCS_GF_POS 10
+#define IWX_RATE_HT_MCS_GF_MSK (1 << IWX_RATE_HT_MCS_GF_POS)
+
+#define IWX_RATE_HT_MCS_INDEX_MSK_V1 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define IWX_RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define IWX_RATE_VHT_MCS_NSS_POS 4
+#define IWX_RATE_VHT_MCS_NSS_MSK (3 << IWX_RATE_VHT_MCS_NSS_POS)
+#define IWX_RATE_VHT_MCS_MIMO2_MSK (1 << IWX_RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define IWX_RATE_LEGACY_RATE_MSK_V1 0xff
+
+/* Bit 10 - OFDM HE */
+#define IWX_RATE_MCS_HE_POS_V1 10
+#define IWX_RATE_MCS_HE_MSK_V1 (1 << RATE_MCS_HE_POS_V1)
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define IWX_RATE_MCS_CHAN_WIDTH_POS 11
+#define IWX_RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_20_V1 (0 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_40_V1 (1 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_80_V1 (2 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_160_V1 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define IWX_RATE_MCS_SGI_POS_V1 13
+#define IWX_RATE_MCS_SGI_MSK_V1 (1 << IWX_RATE_MCS_SGI_POS_V1)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C (unused) */
+#define IWX_RATE_MCS_ANT_POS 14
+#define IWX_RATE_MCS_ANT_A_MSK (1 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_B_MSK (2 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_AB_MSK (IWX_RATE_MCS_ANT_A_MSK | \
+ IWX_RATE_MCS_ANT_B_MSK)
+#define IWX_RATE_MCS_ANT_MSK IWX_RATE_MCS_ANT_ABC_MSK
+#define IWX_RATE_MCS_ANT_NUM 2
+
+/* Bit 17: (0) SS, (1) SS*2 */
+#define IWX_RATE_MCS_STBC_POS 17
+#define IWX_RATE_MCS_STBC_MSK (1 << IWX_RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define IWX_RATE_HE_DUAL_CARRIER_MODE 18
+#define IWX_RATE_HE_DUAL_CARRIER_MODE_MSK (1 << IWX_RATE_HE_DUAL_CARRIER_MODE)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define IWX_RATE_MCS_BF_POS 19
+#define IWX_RATE_MCS_BF_MSK (1 << IWX_RATE_MCS_BF_POS)
+
+/*
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
+ */
+#define IWX_RATE_MCS_HE_GI_LTF_POS 20
+#define IWX_RATE_MCS_HE_GI_LTF_MSK_V1 (3 << IWX_RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define IWX_RATE_MCS_HE_TYPE_POS_V1 22
+#define IWX_RATE_MCS_HE_TYPE_SU_V1 (0 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_EXT_SU_V1 (1 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_MU_V1 (2 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_TRIG_V1 (3 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_MSK_V1 (3 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define IWX_RATE_MCS_DUP_POS_V1 24
+#define IWX_RATE_MCS_DUP_MSK_V1 (3 << IWX_RATE_MCS_DUP_POS_V1)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define IWX_RATE_MCS_LDPC_POS_V1 27
+#define IWX_RATE_MCS_LDPC_MSK_V1 (1 << IWX_RATE_MCS_LDPC_POS_V1)
+
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define IWX_RATE_MCS_HE_106T_POS_V1 28
+#define IWX_RATE_MCS_HE_106T_MSK_V1 (1 << IWX_RATE_MCS_HE_106T_POS_V1)
+
+/* Bit 30-31: (1) RTS, (2) CTS */
+#define IWX_RATE_MCS_RTS_REQUIRED_POS (30)
+#define IWX_RATE_MCS_RTS_REQUIRED_MSK (1 << IWX_RATE_MCS_RTS_REQUIRED_POS)
+#define IWX_RATE_MCS_CTS_REQUIRED_POS (31)
+#define IWX_RATE_MCS_CTS_REQUIRED_MSK (1 << IWX_RATE_MCS_CTS_REQUIRED_POS)
+
+
+/* rate_n_flags bit field version 2
+ *
+ * The 32-bit value has different layouts in the low 8 bits depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ */
+
+/* Bits 10-8: rate format
+ * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
+ * (3) Very High-throughput (VHT) (4) High-efficiency (HE)
+ * (5) Extremely High-throughput (EHT)
+ */
+#define IWX_RATE_MCS_MOD_TYPE_POS 8
+#define IWX_RATE_MCS_MOD_TYPE_MSK (0x7 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_CCK_MSK (0 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_LEGACY_OFDM_MSK (1 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_HT_MSK (2 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_VHT_MSK (3 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_HE_MSK (4 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_EHT_MSK (5 << IWX_RATE_MCS_MOD_TYPE_POS)
+
+/*
+ * Legacy CCK rate format for bits 0:3:
+ *
+ * (0) 0xa - 1 Mbps
+ * (1) 0x14 - 2 Mbps
+ * (2) 0x37 - 5.5 Mbps
+ * (3) 0x6e - 11 nbps
+ *
+ * Legacy OFDM rate format for bits 0:3:
+ *
+ * (0) 6 Mbps
+ * (1) 9 Mbps
+ * (2) 12 Mbps
+ * (3) 18 Mbps
+ * (4) 24 Mbps
+ * (5) 36 Mbps
+ * (6) 48 Mbps
+ * (7) 54 Mbps
+ *
+ */
+#define IWX_RATE_LEGACY_RATE_MSK 0x7
+
+/*
+ * HT, VHT, HE, EHT rate format for bits 3:0
+ * 3-0: MCS
+ *
+ */
+#define IWX_RATE_HT_MCS_CODE_MSK 0x7
+#define IWX_RATE_MCS_NSS_POS 4
+#define IWX_RATE_MCS_NSS_MSK (1 << IWX_RATE_MCS_NSS_POS)
+#define IWX_RATE_MCS_CODE_MSK 0xf
+#define IWX_RATE_HT_MCS_INDEX(r) ((((r) & IWX_RATE_MCS_NSS_MSK) >> 1) | \
+ ((r) & IWX_RATE_HT_MCS_CODE_MSK))
+
+/* Bits 7-5: reserved */
+
+/*
+ * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz
+ */
+#define IWX_RATE_MCS_CHAN_WIDTH_MSK (0x7 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_20 (0 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_40 (1 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_80 (2 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_160 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_320 (4 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 15-14: Antenna selection:
+ * Bit 14: Ant A active
+ * Bit 15: Ant B active
+ *
+ * All relevant definitions are same as in v1
+ */
+
+/* Bit 16 (1) LDPC enables, (0) LDPC disabled */
+#define IWX_RATE_MCS_LDPC_POS 16
+#define IWX_RATE_MCS_LDPC_MSK (1 << IWX_RATE_MCS_LDPC_POS)
+
+/* Bit 17: (0) SS, (1) SS*2 (same as v1) */
+
+/* Bit 18: OFDM-HE dual carrier mode (same as v1) */
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */
+
+/*
+ * Bit 22-20: HE LTF type and guard interval
+ * CCK:
+ * 0 long preamble
+ * 1 short preamble
+ * HT/VHT:
+ * 0 0.8us
+ * 1 0.4us
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * 4 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * */
+#define IWX_RATE_MCS_HE_GI_LTF_MSK (0x7 << IWX_RATE_MCS_HE_GI_LTF_POS)
+#define IWX_RATE_MCS_SGI_POS IWX_RATE_MCS_HE_GI_LTF_POS
+#define IWX_RATE_MCS_SGI_MSK (1 << IWX_RATE_MCS_SGI_POS)
+#define IWX_RATE_MCS_HE_SU_4_LTF 3
+#define IWX_RATE_MCS_HE_SU_4_LTF_08_GI 4
+
+/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define IWX_RATE_MCS_HE_TYPE_POS 23
+#define IWX_RATE_MCS_HE_TYPE_SU (0 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_EXT_SU (1 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_MU (2 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_TRIG (3 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_MSK (3 << IWX_RATE_MCS_HE_TYPE_POS)
+
+/* Bit 25: duplicate channel enabled
+ *
+ * if this bit is set, duplicate is according to BW (bits 11-13):
+ *
+ * CCK: 2x 20MHz
+ * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16)
+ * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160)
+ * */
+#define IWX_RATE_MCS_DUP_POS 25
+#define IWX_RATE_MCS_DUP_MSK (1 << IWX_RATE_MCS_DUP_POS)
+
+/* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define IWX_RATE_MCS_HE_106T_POS 26
+#define IWX_RATE_MCS_HE_106T_MSK (1 << IWX_RATE_MCS_HE_106T_POS)
+
+/* Bit 27: EHT extra LTF:
+ * instead of 1 LTF for SISO use 2 LTFs,
+ * instead of 2 LTFs for NSTS=2 use 4 LTFs*/
+#define IWX_RATE_MCS_EHT_EXTRA_LTF_POS 27
+#define IWX_RATE_MCS_EHT_EXTRA_LTF_MSK (1 << IWX_RATE_MCS_EHT_EXTRA_LTF_POS)
+
+/* Bit 31-28: reserved */
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define IWX_LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define IWX_LQ_FLAG_USE_RTS_POS 0
+#define IWX_LQ_FLAG_USE_RTS_MSK (1 << IWX_LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define IWX_LQ_FLAG_COLOR_POS 1
+#define IWX_LQ_FLAG_COLOR_MSK (7 << IWX_LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define IWX_LQ_FLAG_RTS_BW_SIG_POS 4
+#define IWX_LQ_FLAG_RTS_BW_SIG_NONE (0 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_STATIC (1 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dynamic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define IWX_LQ_FLAG_DYNAMIC_BW_POS 6
+#define IWX_LQ_FLAG_DYNAMIC_BW_MSK (1 << IWX_LQ_FLAG_DYNAMIC_BW_POS)
+
+#define IWX_RLC_CHAIN_INFO_DRIVER_FORCE (1 << 0)
+#define IWL_RLC_CHAIN_INFO_VALID 0x000e
+#define IWL_RLC_CHAIN_INFO_FORCE 0x0070
+#define IWL_RLC_CHAIN_INFO_FORCE_MIMO 0x0380
+#define IWL_RLC_CHAIN_INFO_COUNT 0x0c00
+#define IWL_RLC_CHAIN_INFO_MIMO_COUNT 0x3000
+
+/**
+ * struct iwx_rlc_properties - RLC properties
+ * @rx_chain_info: RX chain info, IWX_RLC_CHAIN_INFO_*
+ * @reserved: reserved
+ */
+struct iwx_rlc_properties {
+ uint32_t rx_chain_info;
+ uint32_t reserved;
+} __packed; /* RLC_PROPERTIES_S_VER_1 */
+
+#define IWX_SAD_MODE_ENABLED (1 << 0)
+#define IWX_SAD_MODE_DEFAULT_ANT_MSK 0x6
+#define IWX_SAD_MODE_DEFAULT_ANT_FW 0x0
+#define IWX_SAD_MODE_DEFAULT_ANT_A 0x2
+#define IWX_SAD_MODE_DEFAULT_ANT_B 0x4
+
+/**
+ * struct iwx_sad_properties - SAD properties
+ * @chain_a_sad_mode: chain A SAD mode, IWX_SAD_MODE_*
+ * @chain_b_sad_mode: chain B SAD mode, IWX_SAD_MODE_*
+ * @mac_id: MAC index
+ * @reserved: reserved
+ */
+struct iwx_sad_properties {
+ uint32_t chain_a_sad_mode;
+ uint32_t chain_b_sad_mode;
+ uint32_t mac_id;
+ uint32_t reserved;
+} __packed;
+
+/**
+ * struct iwx_rlc_config_cmd - RLC configuration
+ * @phy_id: PHY index
+ * @rlc: RLC properties, &struct iwx_rlc_properties
+ * @sad: SAD (single antenna diversity) options, &struct iwx_sad_properties
+ * @flags: flags, IWX_RLC_FLAGS_*
+ * @reserved: reserved
+ */
+struct iwx_rlc_config_cmd {
+ uint32_t phy_id;
+ struct iwx_rlc_properties rlc;
+ struct iwx_sad_properties sad;
+ uint8_t flags;
+ uint8_t reserved[3];
+} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+
+#define IWX_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWX_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * BAID allocation/config action
+ * @IWX_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWX_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWX_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+#define IWX_RX_BAID_ACTION_ADD 0
+#define IWX_RX_BAID_ACTION_MODIFY 1
+#define IWX_RX_BAID_ACTION_REMOVE 2
+/* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwx_rx_baid_cfg_cmd_alloc {
+ uint32_t sta_id_mask;
+ uint8_t tid;
+ uint8_t reserved[3];
+ uint16_t ssn;
+ uint16_t win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwx_rx_baid_cfg_cmd_modify {
+ uint32_t old_sta_id_mask;
+ uint32_t new_sta_id_mask;
+ uint32_t tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwx_rx_baid_cfg_cmd_remove_v1 {
+ uint32_t baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwx_rx_baid_cfg_cmd_remove {
+ uint32_t sta_id_mask;
+ uint32_t tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwx_rx_baid_action
+ */
+struct iwx_rx_baid_cfg_cmd {
+ uint32_t action;
+ union {
+ struct iwx_rx_baid_cfg_cmd_alloc alloc;
+ struct iwx_rx_baid_cfg_cmd_modify modify;
+ struct iwx_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwx_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwx_rx_baid_cfg_resp {
+ uint32_t baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * scheduler queue operation
+ * @IWX_SCD_QUEUE_ADD: allocate a new queue
+ * @IWX_SCD_QUEUE_REMOVE: remove a queue
+ * @IWX_SCD_QUEUE_MODIFY: modify a queue
+ */
+#define IWX_SCD_QUEUE_ADD 0
+#define IWX_SCD_QUEUE_REMOVE 1
+#define IWX_SCD_QUEUE_MODIFY 2
+
+/**
+ * struct iwx_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.sta_mask: station mask of queue to remove
+ * @u.remove.tid: TID of queue to remove
+ * @u.modify.old_sta_mask: old station mask for modify
+ * @u.modify.tid: TID of queue to modify
+ * @u.modify.new_sta_mask: new station mask for modify
+ */
+struct iwx_scd_queue_cfg_cmd {
+ uint32_t operation;
+ union {
+ struct {
+ uint32_t sta_mask;
+ uint8_t tid;
+ uint8_t reserved[3];
+ uint32_t flags;
+ uint32_t cb_size;
+ uint64_t bc_dram_addr;
+ uint64_t tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ uint32_t sta_mask;
+ uint32_t tid;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ uint32_t old_sta_mask;
+ uint32_t tid;
+ uint32_t new_sta_mask;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
+/**
+ * Options for TLC config flags
+ * @IWX_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
+ * bandwidths <= 80MHz
+ * @IWX_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz
+ * bandwidth
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 1 spatial
+ * stream
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 2 spatial
+ * streams
+ */
+#define IWX_TLC_MNG_CFG_FLAGS_STBC_MSK (1 << 0)
+#define IWX_TLC_MNG_CFG_FLAGS_LDPC_MSK (1 << 1)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK (1 << 2)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK (1 << 3)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK (1 << 4)
+
+/**
+ * enum iwx_tlc_mng_cfg_cw - channel width options
+ * @IWX_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel
+ */
+enum iwx_tlc_mng_cfg_cw {
+ IWX_TLC_MNG_CH_WIDTH_20MHZ,
+ IWX_TLC_MNG_CH_WIDTH_40MHZ,
+ IWX_TLC_MNG_CH_WIDTH_80MHZ,
+ IWX_TLC_MNG_CH_WIDTH_160MHZ,
+ IWX_TLC_MNG_CH_WIDTH_320MHZ,
+};
+
+/**
+ * @IWX_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWX_TLC_MNG_CHAIN_B_MSK: chain B
+ */
+#define IWX_TLC_MNG_CHAIN_A_MSK (1 << 0)
+#define IWX_TLC_MNG_CHAIN_B_MSK (1 << 1)
+
+/**
+ * enum iwx_tlc_mng_cfg_mode - supported modes
+ * @IWX_TLC_MNG_MODE_CCK: enable CCK
+ * @IWX_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWX_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWX_TLC_MNG_MODE_HT: enable HT
+ * @IWX_TLC_MNG_MODE_VHT: enable VHT
+ * @IWX_TLC_MNG_MODE_HE: enable HE
+ * @IWX_TLC_MNG_MODE_EHT: enable EHT
+ */
+enum iwx_tlc_mng_cfg_mode {
+ IWX_TLC_MNG_MODE_CCK = 0,
+ IWX_TLC_MNG_MODE_OFDM_NON_HT = IWX_TLC_MNG_MODE_CCK,
+ IWX_TLC_MNG_MODE_NON_HT = IWX_TLC_MNG_MODE_CCK,
+ IWX_TLC_MNG_MODE_HT,
+ IWX_TLC_MNG_MODE_VHT,
+ IWX_TLC_MNG_MODE_HE,
+ IWX_TLC_MNG_MODE_EHT,
+};
+
+/**
+ * @IWX_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWX_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWX_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWX_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWX_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWX_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWX_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWX_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWX_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWX_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWX_TLC_MNG_HT_RATE_MCS10: index of MCS10
+ * @IWX_TLC_MNG_HT_RATE_MCS11: index of MCS11
+ * @IWX_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwx_tlc_mng_ht_rates {
+ IWX_TLC_MNG_HT_RATE_MCS0 = 0,
+ IWX_TLC_MNG_HT_RATE_MCS1,
+ IWX_TLC_MNG_HT_RATE_MCS2,
+ IWX_TLC_MNG_HT_RATE_MCS3,
+ IWX_TLC_MNG_HT_RATE_MCS4,
+ IWX_TLC_MNG_HT_RATE_MCS5,
+ IWX_TLC_MNG_HT_RATE_MCS6,
+ IWX_TLC_MNG_HT_RATE_MCS7,
+ IWX_TLC_MNG_HT_RATE_MCS8,
+ IWX_TLC_MNG_HT_RATE_MCS9,
+ IWX_TLC_MNG_HT_RATE_MCS10,
+ IWX_TLC_MNG_HT_RATE_MCS11,
+ IWX_TLC_MNG_HT_RATE_MAX = IWX_TLC_MNG_HT_RATE_MCS11,
+};
+
+#define IWX_TLC_NSS_1 0
+#define IWX_TLC_NSS_2 1
+#define IWX_TLC_NSS_MAX 2
+
+
+/**
+ * IWX_TLC_MCS_PER_BW - mcs index per BW
+ * @IWX_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
+ * @IWX_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
+ * @IWX_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
+ * @IWX_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
+ * @IWX_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
+ */
+#define IWX_TLC_MCS_PER_BW_80 0
+#define IWX_TLC_MCS_PER_BW_160 1
+#define IWX_TLC_MCS_PER_BW_320 2
+#define IWX_TLC_MCS_PER_BW_NUM_V3 (IWX_TLC_MCS_PER_BW_160 + 1)
+#define IWX_TLC_MCS_PER_BW_NUM_V4 (IWX_TLC_MCS_PER_BW_320 + 1)
+
+/**
+ * struct iwx_tlc_config_cmd_v3 - TLC configuration version 3
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwx_tlc_mng_cfg_cw
+ * @mode: &enum iwx_tlc_mng_cfg_mode
+ * @chains: bitmask of IWX_TLC_MNG_CHAIN_*_MSK
+ * @amsdu: 1 = TX amsdu is supported, 0 = not supported
+ * @flags: bitmask of IWX_TLC_MNG_CFG_*
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: MCS index 0 - 11, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use (1 << IWX_TLC_MNG_CFG_CW_*)
+ * @reserved2: reserved
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwx_tlc_config_cmd_v3 {
+ uint8_t sta_id;
+ uint8_t reserved1[3];
+ uint8_t max_ch_width;
+ uint8_t mode;
+ uint8_t chains;
+ uint8_t amsdu;
+ uint16_t flags;
+ uint16_t non_ht_rates;
+ uint16_t ht_rates[IWX_TLC_NSS_MAX][IWX_TLC_MCS_PER_BW_NUM_V3];
+ uint16_t max_mpdu_len;
+ uint8_t sgi_ch_width_supp;
+ uint8_t reserved2;
+ uint32_t max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
+
+/**
+ * struct iwx_tlc_config_cmd_v4 - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwx_tlc_mng_cfg_cw
+ * @mode: &enum iwx_tlc_mng_cfg_mode
+ * @chains: bitmask of IWX_TLC_MNG_CHAIN_*_MSK
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use (1 << IWX_TLC_MNG_CFG_CW_*)
+ * @flags: bitmask of IWX_TLC_MNG_CFG_*
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: MCS index 0 - 11, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwx_tlc_config_cmd_v4 {
+ uint8_t sta_id;
+ uint8_t reserved1[3];
+ uint8_t max_ch_width;
+ uint8_t mode;
+ uint8_t chains;
+ uint8_t sgi_ch_width_supp;
+ uint16_t flags;
+ uint16_t non_ht_rates;
+ uint16_t ht_rates[IWX_TLC_NSS_MAX][IWX_TLC_MCS_PER_BW_NUM_V4];
+ uint16_t max_mpdu_len;
+ uint16_t max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
+
+/**
+ * @IWX_TLC_NOTIF_FLAG_RATE: last initial rate update
+ * @IWX_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
+ */
+#define IWX_TLC_NOTIF_FLAG_RATE (1 << 0)
+#define IWX_TLC_NOTIF_FLAG_AMSDU (1 << 1)
+
+/**
+ * struct iwx_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @rate: current initial rate; using rate_n_flags version 1 if notification
+ * version is < 3 at run-time, else rate_n_flags version 2
+ * @amsdu_size: Max AMSDU size, in bytes
+ * @amsdu_enabled: bitmap for per-TID AMSDU enablement
+ */
+struct iwx_tlc_update_notif {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+ uint32_t flags;
+ uint32_t rate;
+ uint32_t amsdu_size;
+ uint32_t amsdu_enabled;
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+
+/* Antenna flags. */
+#define IWX_ANT_A (1 << 0)
+#define IWX_ANT_B (1 << 1)
+#define IWX_ANT_C (1 << 2)
+/* Shortcuts. */
+#define IWX_ANT_AB (IWX_ANT_A | IWX_ANT_B)
+#define IWX_ANT_BC (IWX_ANT_B | IWX_ANT_C)
+#define IWX_ANT_ABC (IWX_ANT_A | IWX_ANT_B | IWX_ANT_C)
+
+/*
+ * TX command security control
+ */
+#define IWX_TX_CMD_SEC_WEP 0x01
+#define IWX_TX_CMD_SEC_CCM 0x02
+#define IWX_TX_CMD_SEC_TKIP 0x03
+#define IWX_TX_CMD_SEC_EXT 0x04
+#define IWX_TX_CMD_SEC_MSK 0x07
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define IWX_TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (IWX_TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define IWX_TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define IWX_TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define IWX_TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define IWX_TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define IWX_TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define IWX_TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define IWX_TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define IWX_TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWX_TID_NON_QOS 0
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWX_DEFAULT_TX_RETRY 15
+#define IWX_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWX_RTS_DFAULT_RETRY_LIMIT 3
+#define IWX_BAR_DFAULT_RETRY_LIMIT 60
+#define IWX_LOW_RETRY_LIMIT 7
+
+/*
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
+ */
+#define IWX_FIRST_TB_SIZE 20
+#define IWX_FIRST_TB_SIZE_ALIGN ((IWX_FIRST_TB_SIZE + (64 - 1)) & ~(64 - 1))
+
+/**
+ * %iwx_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+#define IWX_TX_CMD_OFFLD_IP_HDR(x) ((x) << 0)
+#define IWX_TX_CMD_OFFLD_L4_EN (1 << 6)
+#define IWX_TX_CMD_OFFLD_L3_EN (1 << 7)
+#define IWX_TX_CMD_OFFLD_MH_SIZE(x) ((x) << 8)
+#define IWX_TX_CMD_OFFLD_PAD (1 << 13)
+#define IWX_TX_CMD_OFFLD_AMSDU (1 << 14)
+#define IWX_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWX_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
+struct iwx_dram_sec_info {
+ uint32_t pn_low;
+ uint16_t pn_high;
+ uint16_t aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * bitmasks for tx_flags in TX command for 22000
+ * @IWX_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWX_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWX_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+/* Valid for TX_FLAGS_BITS_API_S_VER_3: */
+#define IWX_TX_FLAGS_CMD_RATE (1 << 0)
+#define IWX_TX_FLAGS_ENCRYPT_DIS (1 << 1)
+#define IWX_TX_FLAGS_HIGH_PRI (1 << 2)
+/* Valid for TX_FLAGS_BITS_API_S_VER_4 and above: */
+#define IWX_TX_FLAGS_RTS (1 << 3)
+#define IWX_TX_FLAGS_CTS (1 << 4)
+/* TX_FLAGS_BITS_API_S_VER_4 */
+
+/**
+ * struct iwx_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @flags: combination of TX_FLAGS_*
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
+ */
+struct iwx_tx_cmd_gen2 {
+ uint16_t len;
+ uint16_t offload_assist;
+ uint32_t flags;
+ struct iwx_dram_sec_info dram_info;
+ uint32_t rate_n_flags;
+ struct ieee80211_frame hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7,
+ TX_CMD_API_S_VER_9 */
+
+/**
+ * struct iwx_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of TX_FLAGS_*
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @reserved: reserved
+ * @hdr: 802.11 header
+ */
+struct iwx_tx_cmd_gen3 {
+ uint16_t len;
+ uint16_t flags;
+ uint32_t offload_assist;
+ struct iwx_dram_sec_info dram_info;
+ uint32_t rate_n_flags;
+ uint8_t reserved[8];
+ struct ieee80211_frame hdr[];
+} __packed; /* TX_CMD_API_S_VER_8,
+ TX_CMD_API_S_VER_10 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * status that is returned by the fw after attempts to Tx
+ * @IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * Valid only if frame_count =1
+ */
+#define IWX_TX_STATUS_MSK 0x000000ff
+#define IWX_TX_STATUS_SUCCESS 0x01
+#define IWX_TX_STATUS_DIRECT_DONE 0x02
+/* postpone TX */
+#define IWX_TX_STATUS_POSTPONE_DELAY 0x40
+#define IWX_TX_STATUS_POSTPONE_FEW_BYTES 0x41
+#define IWX_TX_STATUS_POSTPONE_BT_PRIO 0x42
+#define IWX_TX_STATUS_POSTPONE_QUIET_PERIOD 0x43
+#define IWX_TX_STATUS_POSTPONE_CALC_TTAK 0x44
+/* abort TX */
+#define IWX_TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY 0x81
+#define IWX_TX_STATUS_FAIL_SHORT_LIMIT 0x82
+#define IWX_TX_STATUS_FAIL_LONG_LIMIT 0x83
+#define IWX_TX_STATUS_FAIL_UNDERRUN 0x84
+#define IWX_TX_STATUS_FAIL_DRAIN_FLOW 0x85
+#define IWX_TX_STATUS_FAIL_RFKILL_FLUSH 0x86
+#define IWX_TX_STATUS_FAIL_LIFE_EXPIRE 0x87
+#define IWX_TX_STATUS_FAIL_DEST_PS 0x88
+#define IWX_TX_STATUS_FAIL_HOST_ABORTED 0x89
+#define IWX_TX_STATUS_FAIL_BT_RETRY 0x8a
+#define IWX_TX_STATUS_FAIL_STA_INVALID 0x8b
+#define IWX_TX_STATUS_FAIL_FRAG_DROPPED 0x8c
+#define IWX_TX_STATUS_FAIL_TID_DISABLE 0x8d
+#define IWX_TX_STATUS_FAIL_FIFO_FLUSHED 0x8e
+#define IWX_TX_STATUS_FAIL_SMALL_CF_POLL 0x8f
+#define IWX_TX_STATUS_FAIL_FW_DROP 0x90
+#define IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH 0x91
+#define IWX_TX_STATUS_INTERNAL_ABORT 0x92
+#define IWX_TX_MODE_MSK 0x00000f00
+#define IWX_TX_MODE_NO_BURST 0x00000000
+#define IWX_TX_MODE_IN_BURST_SEQ 0x00000100
+#define IWX_TX_MODE_FIRST_IN_BURST 0x00000200
+#define IWX_TX_QUEUE_NUM_MSK 0x0001f000
+#define IWX_TX_NARROW_BW_MSK 0x00060000
+#define IWX_TX_NARROW_BW_1DIV2 0x00020000
+#define IWX_TX_NARROW_BW_1DIV4 0x00040000
+#define IWX_TX_NARROW_BW_1DIV8 0x00060000
+
+/*
+ * TX aggregation status
+ * @IWX_AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ * @IWX_AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ */
+#define IWX_AGG_TX_STATE_STATUS_MSK 0x0fff
+#define IWX_AGG_TX_STATE_TRANSMITTED 0x0000
+#define IWX_AGG_TX_STATE_UNDERRUN 0x0001
+#define IWX_AGG_TX_STATE_BT_PRIO 0x0002
+#define IWX_AGG_TX_STATE_FEW_BYTES 0x0004
+#define IWX_AGG_TX_STATE_ABORT 0x0008
+#define IWX_AGG_TX_STATE_LAST_SENT_TTL 0x0010
+#define IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT 0x0020
+#define IWX_AGG_TX_STATE_LAST_SENT_BT_KILL 0x0040
+#define IWX_AGG_TX_STATE_SCD_QUERY 0x0080
+#define IWX_AGG_TX_STATE_TEST_BAD_CRC32 0x0100
+#define IWX_AGG_TX_STATE_RESPONSE 0x01ff
+#define IWX_AGG_TX_STATE_DUMP_TX 0x0200
+#define IWX_AGG_TX_STATE_DELAY_TX 0x0400
+#define IWX_AGG_TX_STATE_TRY_CNT_POS 12
+#define IWX_AGG_TX_STATE_TRY_CNT_MSK (0xf << IWX_AGG_TX_STATE_TRY_CNT_POS)
+
+#define IWX_AGG_TX_STATE_LAST_SENT_MSK (IWX_AGG_TX_STATE_LAST_SENT_TTL| \
+ IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ IWX_AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define IWX_AGG_TX_STAT_FRAME_NOT_SENT (IWX_AGG_TX_STATE_FEW_BYTES | \
+ IWX_AGG_TX_STATE_ABORT | \
+ IWX_AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * IWX_REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct iwx_agg_tx_status - per packet TX aggregation status
+ * @status: enum iwx_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct iwx_agg_tx_status {
+ uint16_t status;
+ uint16_t sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define IWX_TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define IWX_TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define IWX_TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWX_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWX_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwx_tx_resp_v3 - notifies that fw is TXing a packet
+ * ( IWX_REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. IWX_RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status IWX_TX_STATUS_*
+ * for agg: status of 1st frame, IWX_AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwx_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp_v3 {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint16_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+
+ struct iwx_agg_tx_status status;
+} __packed; /* IWX_TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwx_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @reserved2: reserved for padding/alignment
+ * @status: for non-agg: frame status TX_STATUS_*
+ * For version 6 TX response isn't received for aggregation at all.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint8_t reduced_tpc;
+ uint8_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+ uint16_t tx_queue;
+ uint16_t reserved2;
+ struct iwx_agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_6 */
+
+/**
+ * struct iwx_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
+ * @tid: TID of the queue (0-7)
+ * @reserved: reserved for alignment
+ */
+struct iwx_compressed_ba_tfd {
+ uint16_t q_num;
+ uint16_t tfd_index;
+ uint8_t scd_queue;
+ uint8_t tid;
+ uint8_t reserved[2];
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwx_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwx_compressed_ba_ratid {
+ uint8_t q_num;
+ uint8_t tid;
+ uint16_t ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwx_ba_resp_flags - TX aggregation status
+ * @IWX_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWX_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWX_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWX_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWX_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWX_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwx_ba_resp_flags {
+ IWX_MVM_BA_RESP_TX_AGG,
+ IWX_MVM_BA_RESP_TX_BAR,
+ IWX_MVM_BA_RESP_TX_AGG_FAIL,
+ IWX_MVM_BA_RESP_TX_UNDERRUN,
+ IWX_MVM_BA_RESP_TX_BT_KILL,
+ IWX_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwx_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwx_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @tlc_rate_info: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @reserved: reserved (for alignment)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ * @tfd: array of TFD queue status updates. See &iwx_compressed_ba_tfd
+ * for details. Length in @tfd_cnt.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ * &iwx_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
+ */
+struct iwx_compressed_ba_notif {
+ uint32_t flags;
+ uint8_t sta_id;
+ uint8_t reduced_txp;
+ uint8_t tlc_rate_info;
+ uint8_t retry_cnt;
+ uint32_t query_byte_cnt;
+ uint16_t query_frame_cnt;
+ uint16_t txed;
+ uint16_t done;
+ uint16_t reserved;
+ uint32_t wireless_time;
+ uint32_t tx_rate;
+ uint16_t tfd_cnt;
+ uint16_t ra_tid_cnt;
+ struct iwx_compressed_ba_ratid ra_tid[0];
+ struct iwx_compressed_ba_tfd tfd[];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
+
+struct iwx_beacon_notif {
+ struct iwx_tx_resp_v3 beacon_notify_hdr;
+ uint64_t tsf;
+ uint32_t ibss_mgr_status;
+} __packed;
+
+/**
+ * dump (flush) control flags
+ * @IWX_DUMP_TX_FIFO_FLUSH: Dump MSDUs until the FIFO is empty
+ * and the TFD queues are empty.
+ */
+#define IWX_DUMP_TX_FIFO_FLUSH (1 << 1)
+
+/**
+ * struct iwx_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd_v1 {
+ uint32_t queues_ctl;
+ uint16_t flush_ctl;
+ uint16_t reserved;
+} __packed; /* IWX_TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd {
+ uint32_t sta_id;
+ uint16_t tid_mask;
+ uint16_t reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
+#define IWX_TX_FLUSH_QUEUE_RSP 16
+
+/**
+ * struct iwx_flush_queue_info - virtual flush queue info
+ * @queue_num: virtual queue id
+ * @read_before_flush: read pointer before flush
+ * @read_after_flush: read pointer after flush
+ */
+struct iwx_flush_queue_info {
+ uint16_t tid;
+ uint16_t queue_num;
+ uint16_t read_before_flush;
+ uint16_t read_after_flush;
+} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
+
+/**
+ * struct iwx_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @num_flushed_queues: number of queues in queues array
+ * @queues: all flushed queues
+ */
+struct iwx_tx_path_flush_cmd_rsp {
+ uint16_t sta_id;
+ uint16_t num_flushed_queues;
+ struct iwx_flush_queue_info queues[IWX_TX_FLUSH_QUEUE_RSP];
+} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
+
+
+/**
+ * iwx_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline uint32_t iwx_get_scd_ssn(struct iwx_tx_resp *tx_resp)
+{
+ return le32_to_cpup((uint32_t *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+/**
+ * struct iwx_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token:
+ * @sta_id: station id
+ * @tid:
+ * @scd_queue: scheduler queue to config
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: %enum iwx_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ */
+struct iwx_scd_txq_cfg_cmd {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+ uint8_t enable;
+ uint8_t aggregate;
+ uint8_t tx_fifo;
+ uint8_t window;
+ uint16_t ssn;
+ uint16_t reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwx_scd_txq_cfg_rsp {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+
+/* Scan Commands, Responses, Notifications */
+
+/* Max number of IEs for direct SSID scans in a command */
+#define IWX_PROBE_OPTION_MAX 20
+
+/**
+ * struct iwx_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in IWX_REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwx_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwx_ssid_ie {
+ uint8_t id;
+ uint8_t len;
+ uint8_t ssid[IEEE80211_NWID_LEN];
+} __packed; /* IWX_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/* scan offload */
+#define IWX_SCAN_MAX_BLACKLIST_LEN 64
+#define IWX_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWX_SCAN_MAX_PROFILES 11
+#define IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWX_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWX_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWX_CAN_ABORT_STATUS 1
+
+#define IWX_FULL_SCAN_MULTIPLIER 5
+#define IWX_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWX_MAX_SCHED_SCAN_PLANS 2
+
+/**
+ * iwx_scan_schedule_lmac - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwx_scan_schedule_lmac {
+ uint16_t delay;
+ uint8_t iterations;
+ uint8_t full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+/**
+ * iwx_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_req_tx_cmd {
+ uint32_t tx_flags;
+ uint32_t rate_n_flags;
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed;
+
+#define IWX_UNIFIED_SCAN_CHANNEL_FULL (1 << 27)
+#define IWX_UNIFIED_SCAN_CHANNEL_PARTIAL (1 << 28)
+
+/**
+ * iwx_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags: bits 1-20: directed scan to i'th ssid
+ * other bits &enum iwx_scan_channel_flags_lmac
+ * @channel_number: channel number 1-13 etc
+ * @iter_count: scan iteration on this channel
+ * @iter_interval: interval in seconds between iterations on one channel
+ */
+struct iwx_scan_channel_cfg_lmac {
+ uint32_t flags;
+ uint16_t channel_num;
+ uint16_t iter_count;
+ uint32_t iter_interval;
+} __packed;
+
+/*
+ * iwx_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwx_scan_probe_segment {
+ uint16_t offset;
+ uint16_t len;
+} __packed;
+
+/* iwx_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req_v1 {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[2];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[3];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+
+#define IWX_SCAN_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_SCAN_CHANNEL_FLAG_EBS_ACCURATE (1 << 1)
+#define IWX_SCAN_CHANNEL_FLAG_CACHE_ADD (1 << 2)
+#define IWX_SCAN_CHANNEL_FLAG_EBS_FRAG (1 << 3)
+#define IWX_SCAN_CHANNEL_FLAG_FORCE_EBS (1 << 4)
+#define IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER (1 << 5)
+#define IWX_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER (1 << 6)
+
+/* iwx_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwx_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
+ */
+struct iwx_scan_channel_opt {
+ uint16_t flags;
+ uint16_t non_ebs_ratio;
+} __packed;
+
+#define IWX_SCAN_PRIORITY_LOW 0
+#define IWX_SCAN_PRIORITY_MEDIUM 1
+#define IWX_SCAN_PRIORITY_HIGH 2
+
+enum iwx_scan_priority_ext {
+ IWX_SCAN_PRIORITY_EXT_0_LOWEST,
+ IWX_SCAN_PRIORITY_EXT_1,
+ IWX_SCAN_PRIORITY_EXT_2,
+ IWX_SCAN_PRIORITY_EXT_3,
+ IWX_SCAN_PRIORITY_EXT_4,
+ IWX_SCAN_PRIORITY_EXT_5,
+ IWX_SCAN_PRIORITY_EXT_6,
+ IWX_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
+/**
+ * iwx_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwx_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwx_scan_ebs_status
+ * @time_after_last_iter; time in seconds elapsed after last iteration
+ */
+struct iwx_periodic_scan_complete {
+ uint8_t last_schedule_line;
+ uint8_t last_schedule_iteration;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_after_last_iter;
+ uint32_t reserved;
+} __packed;
+
+/**
+ * struct iwx_scan_results_notif - scan results for one channel -
+ * SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: IWX_SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwx_scan_results_notif {
+ uint8_t channel;
+ uint8_t band;
+ uint8_t probe_status;
+ uint8_t num_probe_not_sent;
+ uint32_t duration;
+} __packed;
+
+#define IWX_SCAN_CLIENT_SCHED_SCAN (1 << 0)
+#define IWX_SCAN_CLIENT_NETDETECT (1 << 1)
+#define IWX_SCAN_CLIENT_ASSET_TRACKING (1 << 2)
+
+/**
+ * iwx_scan_offload_blacklist - IWX_SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
+ */
+struct iwx_scan_offload_blacklist {
+ uint8_t ssid[ETHER_ADDR_LEN];
+ uint8_t reported_rssi;
+ uint8_t client_bitmap;
+} __packed;
+
+#define IWX_NETWORK_TYPE_BSS 1
+#define IWX_NETWORK_TYPE_IBSS 2
+#define IWX_NETWORK_TYPE_ANY 3
+
+#define IWX_SCAN_OFFLOAD_SELECT_2_4 0x4
+#define IWX_SCAN_OFFLOAD_SELECT_5_2 0x8
+#define IWX_SCAN_OFFLOAD_SELECT_ANY 0xc
+
+/**
+ * iwx_scan_offload_profile - IWX_SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption algorithm to match - bitmap
+ * @aut_alg: authentication algorithm to match - bitmap
+ * @network_type: enum iwx_scan_offload_network_type
+ * @band_selection: enum iwx_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
+ */
+struct iwx_scan_offload_profile {
+ uint8_t ssid_index;
+ uint8_t unicast_cipher;
+ uint8_t auth_alg;
+ uint8_t network_type;
+ uint8_t band_selection;
+ uint8_t client_bitmap;
+ uint8_t reserved[2];
+} __packed;
+
+/**
+ * iwx_scan_offload_profile_cfg - IWX_SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blacklist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
+ */
+struct iwx_scan_offload_profile_cfg {
+ struct iwx_scan_offload_profile profiles[IWX_SCAN_MAX_PROFILES];
+ uint8_t blacklist_len;
+ uint8_t num_profiles;
+ uint8_t match_notify;
+ uint8_t pass_match;
+ uint8_t active_clients;
+ uint8_t any_beacon_notify;
+ uint8_t reserved[2];
+} __packed;
+
+#define IWX_SCAN_OFFLOAD_COMPLETED 1
+#define IWX_SCAN_OFFLOAD_ABORTED 2
+
+/* UMAC Scan API */
+
+#define IWX_SCAN_CONFIG_FLAG_ACTIVATE (1 << 0)
+#define IWX_SCAN_CONFIG_FLAG_DEACTIVATE (1 << 1)
+#define IWX_SCAN_CONFIG_FLAG_FORBID_CHUB_REQS (1 << 2)
+#define IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS (1 << 3)
+#define IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS (1 << 8)
+#define IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS (1 << 9)
+#define IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID (1 << 10)
+#define IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES (1 << 11)
+#define IWX_SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES (1 << 12)
+#define IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS (1 << 13)
+#define IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES (1 << 14)
+#define IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR (1 << 15)
+#define IWX_SCAN_CONFIG_FLAG_SET_FRAGMENTED (1 << 16)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED (1 << 17)
+#define IWX_SCAN_CONFIG_FLAG_SET_CAM_MODE (1 << 18)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_CAM_MODE (1 << 19)
+#define IWX_SCAN_CONFIG_FLAG_SET_PROMISC_MODE (1 << 20)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE (1 << 21)
+
+/* Bits 26-31 are for num of channels in channel_array */
+#define IWX_SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+
+/* OFDM basic rates */
+#define IWX_SCAN_CONFIG_RATE_6M (1 << 0)
+#define IWX_SCAN_CONFIG_RATE_9M (1 << 1)
+#define IWX_SCAN_CONFIG_RATE_12M (1 << 2)
+#define IWX_SCAN_CONFIG_RATE_18M (1 << 3)
+#define IWX_SCAN_CONFIG_RATE_24M (1 << 4)
+#define IWX_SCAN_CONFIG_RATE_36M (1 << 5)
+#define IWX_SCAN_CONFIG_RATE_48M (1 << 6)
+#define IWX_SCAN_CONFIG_RATE_54M (1 << 7)
+/* CCK basic rates */
+#define IWX_SCAN_CONFIG_RATE_1M (1 << 8)
+#define IWX_SCAN_CONFIG_RATE_2M (1 << 9)
+#define IWX_SCAN_CONFIG_RATE_5M (1 << 10)
+#define IWX_SCAN_CONFIG_RATE_11M (1 << 11)
+
+/* Bits 16-27 are for supported rates */
+#define IWX_SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+
+#define IWX_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_CHANNEL_FLAG_ACCURATE_EBS (1 << 1)
+#define IWX_CHANNEL_FLAG_EBS_ADD (1 << 2)
+#define IWX_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE (1 << 3)
+
+/**
+ * struct iwx_scan_dwell
+ * @active: default dwell time for active scan
+ * @passive: default dwell time for passive scan
+ * @fragmented: default dwell time for fragmented scan
+ * @extended: default dwell time for channels 1, 6 and 11
+ */
+struct iwx_scan_dwell {
+ uint8_t active;
+ uint8_t passive;
+ uint8_t fragmented;
+ uint8_t extended;
+} __packed;
+
+
+#define IWX_SCAN_TWO_LMACS 2
+#define IWX_SCAN_LB_LMAC_IDX 0 /* low-band */
+#define IWX_SCAN_HB_LMAC_IDX 1 /* high-band */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscuous_mode: whether to enable promiscuous mode
+ * @bcast_sta_id: the index of the station in the fw. Deprecated starting with
+ * API version 5.
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwx_scan_config {
+ uint8_t enable_cam_mode;
+ uint8_t enable_promiscuous_mode;
+ uint8_t bcast_sta_id;
+ uint8_t reserved;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */
+
+/**
+ * struct iwx_scan_config_v2
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @dwell_extended: default dwell time for channels 1, 6 and 11
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwx_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwx_scan_config_v2 {
+ uint32_t flags;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+ uint32_t legacy_rates;
+ uint32_t out_of_channel_time[IWX_SCAN_TWO_LMACS];
+ uint32_t suspend_time[IWX_SCAN_TWO_LMACS];
+ struct iwx_scan_dwell dwell;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t bcast_sta_id;
+ uint8_t channel_flags;
+ uint8_t channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * iwx_umac_scan_flags
+ *@IWX_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan will be resumed when the higher priority scan is
+ * completed.
+ *@IWX_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+#define IWX_UMAC_SCAN_FLAG_PREEMPTIVE (1 << 0)
+#define IWX_UMAC_SCAN_FLAG_START_NOTIF (1 << 1)
+
+#define IWX_UMAC_SCAN_UID_TYPE_OFFSET 0
+#define IWX_UMAC_SCAN_UID_SEQ_OFFSET 8
+
+#define IWX_UMAC_SCAN_GEN_FLAGS_PERIODIC (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS_OVER_BT (1 << 1)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL (1 << 2)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE (1 << 3)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT (1 << 4)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE (1 << 5)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID (1 << 6)
+#define IWX_UMAC_SCAN_GEN_FLAGS_FRAGMENTED (1 << 7)
+#define IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED (1 << 8)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MATCH (1 << 9)
+#define IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL (1 << 10)
+/* Extended dwell is obsolete when adaptive dwell is used, making this
+ * bit reusable. Hence, probe request defer is used only when adaptive
+ * dwell is supported. */
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP (1 << 10)
+#define IWX_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED (1 << 11)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL (1 << 13)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME (1 << 14)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE (1 << 15)
+
+/**
+ * UMAC scan general flags #2
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ * notification per channel or not.
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
+ */
+#define IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER (1 << 1)
+
+/**
+ * UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
+ * 5.2Ghz bands scan, trigger scan on 6GHz band to discover
+ * the reported collocated APs
+ */
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL (1 << 1)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE (1 << 2)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 (1 << 3)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 (1 << 4)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_MATCH (1 << 5)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS (1 << 6)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL (1 << 7)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE (1 << 8)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_NTF_START (1 << 9)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID (1 << 10)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE (1 << 11)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN (1 << 12)
+
+/**
+ * struct iwx_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan iterations on one channel.
+ */
+struct iwx_scan_channel_cfg_umac {
+ uint32_t flags;
+ union {
+ struct {
+ uint8_t channel_num;
+ uint8_t iter_count;
+ uint16_t iter_interval;
+ } v1; /* SCAN_CHANNEL_CFG_S_VER1 */
+ struct {
+ uint8_t channel_num;
+ uint8_t band;
+ uint8_t iter_count;
+ uint8_t iter_interval;
+ } v2; /* SCAN_CHANNEL_CFG_S_VER{2,3,4} */
+ };
+} __packed;
+
+/**
+ * struct iwx_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_umac_schedule {
+ uint16_t interval;
+ uint8_t iter_count;
+ uint8_t reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v1 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwx_scan_probe_req_v1 preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v2 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_2 */
+ struct iwx_scan_probe_req preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_umac_chan_param
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ */
+struct iwx_scan_umac_chan_param {
+ uint8_t flags;
+ uint8_t count;
+ uint16_t reserved;
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+
+#define IWX_SCAN_LB_LMAC_IDX 0
+#define IWX_SCAN_HB_LMAC_IDX 1
+
+/**
+ * struct iwx_scan_req_umac
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan per LMAC
+ * @passive_dwell: dwell time for passive scan per LMAC
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @general_flags2: &enum iwl_umac_scan_general_flags2
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ * are 2 LMACs (high band and low band)
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @num_of_fragments: Number of fragments needed for full coverage per band.
+ * Relevant only for fragmented scan.
+ * @channel: &struct iwx_scan_umac_chan_param
+ * @reserved: for future use and alignment
+ * @reserved3: for future use and alignment
+ * @data: &struct iwx_scan_channel_cfg_umac and
+ * &struct iwx_scan_req_umac_tail
+ */
+struct iwx_scan_req_umac {
+ uint32_t flags;
+ uint32_t uid;
+ uint32_t ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ uint16_t general_flags;
+ uint8_t reserved;
+ uint8_t scan_start_mac_id;
+ union {
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t reserved3;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t reserved2;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t adwell_default_hb_n_aps;
+ uint8_t adwell_default_lb_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */
+ };
+} __packed;
+
+#define IWX_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwx_scan_req_umac)
+#define IWX_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWX_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWX_SCAN_REQ_UMAC_SIZE_V1 36
+
+/**
+ * struct iwx_scan_general_params_v10
+ * @flags: &enum iwx_umac_scan_flags
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @reserved1: reserved for future
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwx_scan_general_params_v10 {
+ uint16_t flags;
+ uint8_t reserved;
+ uint8_t scan_start_mac_id;
+ uint8_t active_dwell[IWX_SCAN_TWO_LMACS];
+ uint8_t adwell_default_2g;
+ uint8_t adwell_default_5g;
+ uint8_t adwell_default_social_chn;
+ uint8_t reserved1;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_of_time[IWX_SCAN_TWO_LMACS];
+ uint32_t suspend_time[IWX_SCAN_TWO_LMACS];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[IWX_SCAN_TWO_LMACS];
+ uint8_t num_of_fragments[IWX_SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+
+/**
+ * struct iwx_scan_channel_params_v6
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @n_aps_override: override the number of APs the FW uses to calculate dwell
+ * time when adaptive dwell is used.
+ * Channel k will use n_aps_override[i] when BIT(20 + i) is set in
+ * channel_config[k].flags
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwx_scan_channel_params_v6 {
+ uint8_t flags;
+ uint8_t count;
+ uint8_t n_aps_override[2];
+ struct iwx_scan_channel_cfg_umac channel_config[67];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
+
+/**
+ * struct iwx_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwx_scan_periodic_parms_v1 {
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+#define IWX_SCAN_SHORT_SSID_MAX_SIZE 8
+#define IWX_SCAN_BSSID_MAX_SIZE 16
+
+/**
+ * struct iwx_scan_probe_params_v4
+ * @preq: scan probe request params
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwx_scan_probe_params_v4 {
+ struct iwx_scan_probe_req preq;
+ uint8_t short_ssid_num;
+ uint8_t bssid_num;
+ uint16_t reserved;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+ uint32_t short_ssid[IWX_SCAN_SHORT_SSID_MAX_SIZE];
+ uint8_t bssid_array[IWX_SCAN_BSSID_MAX_SIZE][ETHER_ADDR_LEN];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
+
+/**
+ * struct iwx_scan_req_params_v14
+ * @general_params: &struct iwx_scan_general_params_v10
+ * @channel_params: &struct iwx_scan_channel_params_v6
+ * @periodic_params: &struct iwx_scan_periodic_parms_v1
+ * @probe_params: &struct iwx_scan_probe_params_v4
+ */
+struct iwx_scan_req_params_v14 {
+ struct iwx_scan_general_params_v10 general_params;
+ struct iwx_scan_channel_params_v6 channel_params;
+ struct iwx_scan_periodic_parms_v1 periodic_params;
+ struct iwx_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
+
+/**
+ * struct iwx_scan_req_umac_v14
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwx_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwx_scan_req_umac_v14 {
+ uint32_t uid;
+ uint32_t ooc_priority;
+ struct iwx_scan_req_params_v14 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
+
+/**
+ * struct iwx_umac_scan_abort
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwx_umac_scan_abort {
+ uint32_t uid;
+ uint32_t flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwx_umac_scan_complete
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwx_scan_offload_complete_status
+ * @ebs_status: &enum iwx_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwx_umac_scan_complete {
+ uint32_t uid;
+ uint8_t last_schedule;
+ uint8_t last_iter;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_from_last_iter;
+ uint32_t reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwx_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwx_scan_offload_profile_match {
+ uint8_t bssid[ETHER_ADDR_LEN];
+ uint16_t reserved;
+ uint8_t channel;
+ uint8_t energy;
+ uint8_t matching_feature;
+ uint8_t matching_channels[IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwx_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwx_scan_offload_profiles_query {
+ uint32_t matched_profiles;
+ uint32_t last_scan_age;
+ uint32_t n_scans_done;
+ uint32_t gp2_d0u;
+ uint32_t gp2_invoked;
+ uint8_t resume_while_scanning;
+ uint8_t self_recovery;
+ uint16_t reserved;
+ struct iwx_scan_offload_profile_match matches[IWX_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwx_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwx_umac_scan_iter_complete_notif {
+ uint32_t uid;
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwx_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_GSCAN_START_CMD 0x0
+#define IWX_GSCAN_STOP_CMD 0x1
+#define IWX_GSCAN_SET_HOTLIST_CMD 0x2
+#define IWX_GSCAN_RESET_HOTLIST_CMD 0x3
+#define IWX_GSCAN_SET_SIGNIFICANT_CHANGE_CMD 0x4
+#define IWX_GSCAN_RESET_SIGNIFICANT_CHANGE_CMD 0x5
+#define IWX_GSCAN_SIGNIFICANT_CHANGE_EVENT 0xFD
+#define IWX_GSCAN_HOTLIST_CHANGE_EVENT 0xFE
+#define IWX_GSCAN_RESULTS_AVAILABLE_EVENT 0xFF
+
+/* STA API */
+
+/**
+ * flags for the ADD_STA host command
+ * @IWX_STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @IWX_STA_FLG_REDUCED_TX_PWR_DATA:
+ * @IWX_STA_FLG_DISABLE_TX: set if TX should be disabled
+ * @IWX_STA_FLG_PS: set if STA is in Power Save
+ * @IWX_STA_FLG_INVALID: set if STA is invalid
+ * @IWX_STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @IWX_STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @IWX_STA_FLG_DRAIN_FLOW: drain flow
+ * @IWX_STA_FLG_PAN: STA is for PAN interface
+ * @IWX_STA_FLG_CLASS_AUTH:
+ * @IWX_STA_FLG_CLASS_ASSOC:
+ * @IWX_STA_FLG_CLASS_MIMO_PROT:
+ * @IWX_STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @IWX_STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @IWX_STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @IWX_STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @IWX_STA_FLG_MFP_EN: Management Frame Protection
+ */
+#define IWX_STA_FLG_REDUCED_TX_PWR_CTRL (1 << 3)
+#define IWX_STA_FLG_REDUCED_TX_PWR_DATA (1 << 6)
+
+#define IWX_STA_FLG_DISABLE_TX (1 << 4)
+
+#define IWX_STA_FLG_PS (1 << 8)
+#define IWX_STA_FLG_DRAIN_FLOW (1 << 12)
+#define IWX_STA_FLG_PAN (1 << 13)
+#define IWX_STA_FLG_CLASS_AUTH (1 << 14)
+#define IWX_STA_FLG_CLASS_ASSOC (1 << 15)
+#define IWX_STA_FLG_RTS_MIMO_PROT (1 << 17)
+
+#define IWX_STA_FLG_MAX_AGG_SIZE_SHIFT 19
+#define IWX_STA_FLG_MAX_AGG_SIZE_8K (0 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_16K (1 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_32K (2 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_64K (3 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_128K (4 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_256K (5 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_512K (6 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_1024K (7 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_2M (8 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_4M (9 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_MSK (0xf << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+
+#define IWX_STA_FLG_AGG_MPDU_DENS_SHIFT 23
+#define IWX_STA_FLG_AGG_MPDU_DENS_2US (4 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_4US (5 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_8US (6 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_16US (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_MSK (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+
+#define IWX_STA_FLG_FAT_EN_20MHZ (0 << 26)
+#define IWX_STA_FLG_FAT_EN_40MHZ (1 << 26)
+#define IWX_STA_FLG_FAT_EN_80MHZ (2 << 26)
+#define IWX_STA_FLG_FAT_EN_160MHZ (3 << 26)
+#define IWX_STA_FLG_FAT_EN_MSK (3 << 26)
+
+#define IWX_STA_FLG_MIMO_EN_SISO (0 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO2 (1 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO3 (2 << 28)
+#define IWX_STA_FLG_MIMO_EN_MSK (3 << 28)
+
+/**
+ * key flags for the ADD_STA host command
+ * @IWX_STA_KEY_FLG_NO_ENC: no encryption
+ * @IWX_STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @IWX_STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @IWX_STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @IWX_STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @IWX_STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @IWX_STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @IWX_STA_KEY_FLG_EN_MSK: mask for encryption algorithm value
+ * @IWX_STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @IWX_STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @IWX_STA_KEY_NOT_VALID: key is invalid
+ * @IWX_STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @IWX_STA_KEY_MULTICAST: set for multicast key
+ * @IWX_STA_KEY_MFP: key is used for Management Frame Protection
+ */
+#define IWX_STA_KEY_FLG_NO_ENC (0 << 0)
+#define IWX_STA_KEY_FLG_WEP (1 << 0)
+#define IWX_STA_KEY_FLG_CCM (2 << 0)
+#define IWX_STA_KEY_FLG_TKIP (3 << 0)
+#define IWX_STA_KEY_FLG_EXT (4 << 0)
+#define IWX_STA_KEY_FLG_CMAC (6 << 0)
+#define IWX_STA_KEY_FLG_ENC_UNKNOWN (7 << 0)
+#define IWX_STA_KEY_FLG_EN_MSK (7 << 0)
+#define IWX_STA_KEY_FLG_WEP_KEY_MAP (1 << 3)
+#define IWX_STA_KEY_FLG_KEYID_POS 8
+#define IWX_STA_KEY_FLG_KEYID_MSK (3 << IWX_STA_KEY_FLG_KEYID_POS)
+#define IWX_STA_KEY_NOT_VALID (1 << 11)
+#define IWX_STA_KEY_FLG_WEP_13BYTES (1 << 12)
+#define IWX_STA_KEY_MULTICAST (1 << 14)
+#define IWX_STA_KEY_MFP (1 << 15)
+
+/**
+ * indicate to the fw what flag are being changed
+ * @IWX_STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
+ * @IWX_STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @IWX_STA_MODIFY_TX_RATE: unused
+ * @IWX_STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @IWX_STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @IWX_STA_MODIFY_PROT_TH:
+ * @IWX_STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+#define IWX_STA_MODIFY_QUEUE_REMOVAL (1 << 0)
+#define IWX_STA_MODIFY_TID_DISABLE_TX (1 << 1)
+#define IWX_STA_MODIFY_TX_RATE (1 << 2)
+#define IWX_STA_MODIFY_ADD_BA_TID (1 << 3)
+#define IWX_STA_MODIFY_REMOVE_BA_TID (1 << 4)
+#define IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT (1 << 5)
+#define IWX_STA_MODIFY_PROT_TH (1 << 6)
+#define IWX_STA_MODIFY_QUEUES (1 << 7)
+
+#define IWX_STA_MODE_MODIFY 1
+
+/**
+ * type of sleep of the station
+ * @IWX_STA_SLEEP_STATE_AWAKE:
+ * @IWX_STA_SLEEP_STATE_PS_POLL:
+ * @IWX_STA_SLEEP_STATE_UAPSD:
+ * @IWX_STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
+ */
+#define IWX_STA_SLEEP_STATE_AWAKE 0
+#define IWX_STA_SLEEP_STATE_PS_POLL (1 << 0)
+#define IWX_STA_SLEEP_STATE_UAPSD (1 << 1)
+#define IWX_STA_SLEEP_STATE_MOREDATA (1 << 2)
+
+/* STA ID and color bits definitions */
+#define IWX_STA_ID_SEED (0x0f)
+#define IWX_STA_ID_POS (0)
+#define IWX_STA_ID_MSK (IWX_STA_ID_SEED << IWX_STA_ID_POS)
+
+#define IWX_STA_COLOR_SEED (0x7)
+#define IWX_STA_COLOR_POS (4)
+#define IWX_STA_COLOR_MSK (IWX_STA_COLOR_SEED << IWX_STA_COLOR_POS)
+
+#define IWX_STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & IWX_STA_COLOR_MSK) >> IWX_STA_COLOR_POS)
+#define IWX_STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & IWX_STA_ID_MSK) >> IWX_STA_ID_POS)
+
+#define IWX_STA_KEY_MAX_NUM (16)
+#define IWX_STA_KEY_IDX_INVALID (0xff)
+#define IWX_STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWX_MAX_GLOBAL_KEYS (4)
+#define IWX_STA_KEY_LEN_WEP40 (5)
+#define IWX_STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwx_keyinfo - key information
+ * @key_flags: type %iwx_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwx_keyinfo {
+ uint16_t key_flags;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved1;
+ uint16_t tkip_rx_ttak[5];
+ uint8_t key_offset;
+ uint8_t reserved2;
+ uint8_t key[16];
+ uint64_t tx_secur_seq_cnt;
+ uint64_t hw_tkip_mic_rx_key;
+ uint64_t hw_tkip_mic_tx_key;
+} __packed;
+
+#define IWX_ADD_STA_STATUS_MASK 0xFF
+#define IWX_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWX_ADD_STA_BAID_MASK 0x7F00
+#define IWX_ADD_STA_BAID_SHIFT 8
+
+/**
+ * struct iwx_add_sta_cmd - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obsolete for new TX API (9 and above).
+ * @rx_ba_window: aggregation window size
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwx_add_sta_cmd {
+ uint8_t add_modify;
+ uint8_t awake_acs;
+ uint16_t tid_disable_tx;
+ uint32_t mac_id_n_color;
+ uint8_t addr[ETHER_ADDR_LEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ uint16_t reserved2;
+ uint8_t sta_id;
+ uint8_t modify_mask;
+ uint16_t reserved3;
+ uint32_t station_flags;
+ uint32_t station_flags_msk;
+ uint8_t add_immediate_ba_tid;
+ uint8_t remove_immediate_ba_tid;
+ uint16_t add_immediate_ba_ssn;
+ uint16_t sleep_tx_count;
+ uint8_t sleep_state_flags;
+ uint8_t station_type;
+ uint16_t assoc_id;
+ uint16_t beamform_flags;
+ uint32_t tfd_queue_msk;
+ uint16_t rx_ba_window;
+ uint8_t sp_length;
+ uint8_t uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
+
+/**
+ * FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWX_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWX_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWX_STA_MULTICAST: multicast traffic,
+ * @IWX_STA_TDLS_LINK: TDLS link station
+ * @IWX_STA_AUX_ACTIVITY: auxiliary station (scan, ROC and so on).
+ */
+#define IWX_STA_LINK 0
+#define IWX_STA_GENERAL_PURPOSE 1
+#define IWX_STA_MULTICAST 2
+#define IWX_STA_TDLS_LINK 3
+#define IWX_STA_AUX_ACTIVITY 4
+
+/**
+ * struct iwx_add_sta_key_common - add/modify sta key common part
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: IWX_STA_KEY_FLG_*
+ * @key: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ */
+struct iwx_add_sta_key_common {
+ uint8_t sta_id;
+ uint8_t key_offset;
+ uint16_t key_flags;
+ uint8_t key[32];
+ uint8_t rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwx_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwx_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @reserved: reserved
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwx_add_sta_key_cmd_v1 {
+ struct iwx_add_sta_key_common common;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved;
+ uint16_t tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * struct iwx_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwx_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwx_add_sta_key_cmd {
+ struct iwx_add_sta_key_common common;
+ uint64_t rx_mic_key;
+ uint64_t tx_mic_key;
+ uint64_t transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
+/**
+ * status in the response to ADD_STA command
+ * @IWX_ADD_STA_SUCCESS: operation was executed successfully
+ * @IWX_ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @IWX_ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @IWX_ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station
+ * that doesn't exist.
+ */
+#define IWX_ADD_STA_SUCCESS 0x1
+#define IWX_ADD_STA_STATIONS_OVERLOAD 0x2
+#define IWX_ADD_STA_IMMEDIATE_BA_FAILURE 0x4
+#define IWX_ADD_STA_MODIFY_NON_EXISTING_STA 0x8
+
+/**
+ * struct iwx_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( IWX_REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwx_rm_sta_cmd {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed; /* IWX_REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_mgmt_mcast_key_cmd
+ * ( IWX_MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwx_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwx_mgmt_mcast_key_cmd {
+ uint32_t ctrl_flags;
+ uint8_t IGTK[16];
+ uint8_t K1[16];
+ uint8_t K2[16];
+ uint32_t key_id;
+ uint32_t sta_id;
+ uint64_t receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwx_wep_key {
+ uint8_t key_index;
+ uint8_t key_offset;
+ uint16_t reserved1;
+ uint8_t key_size;
+ uint8_t reserved2[3];
+ uint8_t key[16];
+} __packed;
+
+struct iwx_wep_key_cmd {
+ uint32_t mac_id_n_color;
+ uint8_t num_keys;
+ uint8_t decryption_type;
+ uint8_t flags;
+ uint8_t reserved;
+ struct iwx_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+/*
+ * BT coex
+ */
+
+#define IWX_BT_COEX_DISABLE 0x0
+#define IWX_BT_COEX_NW 0x1
+#define IWX_BT_COEX_BT 0x2
+#define IWX_BT_COEX_WIFI 0x3
+/* BT_COEX_MODES_E */
+
+#define IWX_BT_COEX_MPLUT_ENABLED (1 << 0)
+#define IWX_BT_COEX_MPLUT_BOOST_ENABLED (1 << 1)
+#define IWX_BT_COEX_SYNC2SCO_ENABLED (1 << 2)
+#define IWX_BT_COEX_CORUN_ENABLED (1 << 3)
+#define IWX_BT_COEX_HIGH_BAND_RET (1 << 4)
+/* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+enum iwx_bt_coex_enabled_modules {
+ BT_COEX_DISABLE = 1 << 0,
+ BT_COEX_MPLUT_BOOST_ENABLED = 1 << 1,
+ BT_COEX_SYNC2SCO_ENABLED = 1 << 2,
+ BT_COEX_CORUN_ENABLED = 1 << 3,
+ BT_COEX_HIGH_BAND_RET = 1 << 4,
+};
+
+/**
+ * struct iwx_bt_coex_cmd - bt coex configuration command
+ * @mode: enum %iwx_bt_coex_mode
+ * @enabled_modules: enum %iwx_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwx_bt_coex_cmd {
+ uint32_t mode;
+ uint32_t enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+
+/*
+ * Location Aware Regulatory (LAR) API - MCC updates
+ */
+
+/**
+ * struct iwx_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see IWX_MCC_SOURCE_*
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwx_mcc_update_cmd {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved;
+ uint32_t key;
+ uint32_t reserved2[5];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * iwx_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwx_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see IWX_MCC_SOURCE_*
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @reserved: reserved.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwx_mcc_update_resp_v3 {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint16_t time;
+ uint16_t geo_info;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+
+/**
+ * geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ * for the 5 GHz band.
+ */
+#define IWX_GEO_NO_INFO 0
+#define IWX_GEO_WMM_ETSI_5GHZ_INFO (1 << 0)
+
+/**
+ * struct iwx_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see IWX_GEO_*
+ * @source_id: the MCC source, see IWX_MCC_SOURCE_*
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwx_mcc_update_resp {
+ uint32_t status;
+ uint16_t mcc;
+ uint16_t cap;
+ uint16_t time;
+ uint16_t geo_info;
+ uint8_t source_id;
+ uint8_t reserved[3];
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
+
+/**
+ * struct iwx_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see IWX_MCC_SOURCE_*
+ * @reserved1: reserved for alignment
+ */
+struct iwx_mcc_chub_notif {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwx_mcc_update_status {
+ IWX_MCC_RESP_NEW_CHAN_PROFILE,
+ IWX_MCC_RESP_SAME_CHAN_PROFILE,
+ IWX_MCC_RESP_INVALID,
+ IWX_MCC_RESP_NVM_DISABLED,
+ IWX_MCC_RESP_ILLEGAL,
+ IWX_MCC_RESP_LOW_PRIORITY,
+ IWX_MCC_RESP_TEST_MODE_ACTIVE,
+ IWX_MCC_RESP_TEST_MODE_NOT_ACTIVE,
+ IWX_MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+};
+
+#define IWX_MCC_SOURCE_OLD_FW 0
+#define IWX_MCC_SOURCE_ME 1
+#define IWX_MCC_SOURCE_BIOS 2
+#define IWX_MCC_SOURCE_3G_LTE_HOST 3
+#define IWX_MCC_SOURCE_3G_LTE_DEVICE 4
+#define IWX_MCC_SOURCE_WIFI 5
+#define IWX_MCC_SOURCE_RESERVED 6
+#define IWX_MCC_SOURCE_DEFAULT 7
+#define IWX_MCC_SOURCE_UNINITIALIZED 8
+#define IWX_MCC_SOURCE_MCC_API 9
+#define IWX_MCC_SOURCE_GET_CURRENT 0x10
+#define IWX_MCC_SOURCE_GETTING_MCC_TEST_MODE 0x11
+
+/*
+ * From Linux commit ab02165ccec4c78162501acedeef1a768acdb811:
+ * As the firmware is slowly running out of command IDs and grouping of
+ * commands is desirable anyway, the firmware is extending the command
+ * header from 4 bytes to 8 bytes to introduce a group (in place of the
+ * former flags field, since that's always 0 on commands and thus can
+ * be easily used to distinguish between the two).
+ *
+ * These functions retrieve specific information from the id field in
+ * the iwx_host_cmd struct which contains the command id, the group id,
+ * and the version of the command.
+*/
+static inline uint8_t
+iwx_cmd_opcode(uint32_t cmdid)
+{
+ return cmdid & 0xff;
+}
+
+static inline uint8_t
+iwx_cmd_groupid(uint32_t cmdid)
+{
+ return ((cmdid & 0Xff00) >> 8);
+}
+
+static inline uint8_t
+iwx_cmd_version(uint32_t cmdid)
+{
+ return ((cmdid & 0xff0000) >> 16);
+}
+
+static inline uint32_t
+iwx_cmd_id(uint8_t opcode, uint8_t groupid, uint8_t version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make uint16_t wide id out of uint8_t group and opcode */
+#define IWX_WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+struct iwx_cmd_header {
+ uint8_t code;
+ uint8_t flags;
+ uint8_t idx;
+ uint8_t qid;
+} __packed;
+
+struct iwx_cmd_header_wide {
+ uint8_t opcode;
+ uint8_t group_id;
+ uint8_t idx;
+ uint8_t qid;
+ uint16_t length;
+ uint8_t reserved;
+ uint8_t version;
+} __packed;
+
+#define IWX_POWER_SCHEME_CAM 1
+#define IWX_POWER_SCHEME_BPS 2
+#define IWX_POWER_SCHEME_LP 3
+
+#define IWX_DEF_CMD_PAYLOAD_SIZE 320
+#define IWX_MAX_CMD_PAYLOAD_SIZE (4096 - sizeof(struct iwx_cmd_header_wide))
+#define IWX_CMD_FAILED_MSK 0x40
+
+/**
+ * struct iwx_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwx_device_cmd {
+ union {
+ struct {
+ struct iwx_cmd_header hdr;
+ uint8_t data[IWX_DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwx_cmd_header_wide hdr_wide;
+ uint8_t data_wide[IWX_DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwx_cmd_header_wide) +
+ sizeof(struct iwx_cmd_header)];
+ };
+ };
+} __packed;
+
+struct iwx_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-26: Reserved
+ * 25: Offload enabled
+ * 24: RPF enabled
+ * 23: RSS enabled
+ * 22: Checksum enabled
+ * 21-16: RX queue
+ * 15-14: Reserved
+ * 13-00: RX frame size
+ */
+ uint32_t len_n_flags;
+ struct iwx_cmd_header hdr;
+ uint8_t data[];
+} __packed;
+
+#define IWX_FH_RSCSR_FRAME_SIZE_MSK 0x00003fff
+#define IWX_FH_RSCSR_FRAME_INVALID 0x55550000
+#define IWX_FH_RSCSR_FRAME_ALIGN 0x40
+#define IWX_FH_RSCSR_RPA_EN (1 << 25)
+#define IWX_FH_RSCSR_RADA_EN (1 << 26)
+#define IWX_FH_RSCSR_RXQ_POS 16
+#define IWX_FH_RSCSR_RXQ_MASK 0x3F0000
+
+static uint32_t
+iwx_rx_packet_len(const struct iwx_rx_packet *pkt)
+{
+
+ return le32toh(pkt->len_n_flags) & IWX_FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static uint32_t
+iwx_rx_packet_payload_len(const struct iwx_rx_packet *pkt)
+{
+
+ return iwx_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
+
+#define IWX_MIN_DBM -100
+#define IWX_MAX_DBM -33 /* realistic guess */
+
+#define IWX_READ(sc, reg) \
+ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
+
+//#define IWX_WRITE(sc, reg, val) \
+// printf("=== WRITE %s: %x %zx\n", __func__, reg, (unsigned long)(val)); \
+// bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_WRITE(sc, reg, val) \
+ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_WRITE_1(sc, reg, val) \
+ bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+//#define IWX_SETBITS(sc, reg, mask) { \
+// uint32_t val = IWX_READ(sc, reg); \
+// printf("=== READ %s: %x %x\n", __func__, (reg), (val)); \
+// IWX_WRITE(sc, reg, IWX_READ(sc, reg) | (mask)); }
+#define IWX_SETBITS(sc, reg, mask) { \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) | (mask)); }
+
+#define IWX_CLRBITS(sc, reg, mask) \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) & ~(mask))
+
+#define IWX_BARRIER_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_WRITE)
+
+#define IWX_BARRIER_READ_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
diff --git a/sys/dev/iwx/if_iwxvar.h b/sys/dev/iwx/if_iwxvar.h
new file mode 100644
--- /dev/null
+++ b/sys/dev/iwx/if_iwxvar.h
@@ -0,0 +1,936 @@
+/* $OpenBSD: if_iwxvar.h,v 1.41 2023/03/06 11:53:24 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014 genua mbh <info@genua.de>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct iwx_rx_radiotap_header {
+ struct ieee80211_radiotap_header wr_ihdr;
+ uint64_t wr_tsft;
+ uint8_t wr_flags;
+ uint8_t wr_rate;
+ uint16_t wr_chan_freq;
+ uint16_t wr_chan_flags;
+ int8_t wr_dbm_antsignal;
+ int8_t wr_dbm_antnoise;
+} __packed;
+
+#define IWX_RX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
+
+struct iwx_tx_radiotap_header {
+ struct ieee80211_radiotap_header wt_ihdr;
+ uint8_t wt_flags;
+ uint8_t wt_rate;
+ uint16_t wt_chan_freq;
+ uint16_t wt_chan_flags;
+} __packed;
+
+#define IWX_TX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL))
+
+#define IWX_UCODE_SECT_MAX 57
+
+/*
+ * fw_status is used to determine if we've already parsed the firmware file
+ *
+ * In addition to the following, status < 0 ==> -error
+ */
+#define IWX_FW_STATUS_NONE 0
+#define IWX_FW_STATUS_INPROGRESS 1
+#define IWX_FW_STATUS_DONE 2
+
+enum iwx_ucode_type {
+ IWX_UCODE_TYPE_REGULAR,
+ IWX_UCODE_TYPE_INIT,
+ IWX_UCODE_TYPE_WOW,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER,
+ IWX_UCODE_TYPE_MAX
+};
+
+struct iwx_fw_info {
+ void *fw_rawdata;
+ size_t fw_rawsize;
+ int fw_status;
+
+ struct iwx_fw_sects {
+ struct iwx_fw_onesect {
+ const void *fws_data;
+ uint32_t fws_len;
+ uint32_t fws_devoff;
+ } fw_sect[IWX_UCODE_SECT_MAX];
+ size_t fw_totlen;
+ int fw_count;
+ } fw_sects[IWX_UCODE_TYPE_MAX];
+
+ /* FW debug data parsed for driver usage */
+ int dbg_dest_tlv_init;
+ const uint8_t *dbg_dest_ver;
+ uint8_t n_dest_reg;
+ const struct iwx_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+
+ const struct iwx_fw_dbg_conf_tlv *dbg_conf_tlv[IWX_FW_DBG_CONF_MAX];
+ size_t dbg_conf_tlv_len[IWX_FW_DBG_CONF_MAX];
+ struct iwx_fw_dbg_trigger_tlv *dbg_trigger_tlv[IWX_FW_DBG_TRIGGER_MAX];
+ size_t dbg_trigger_tlv_len[IWX_FW_DBG_TRIGGER_MAX];
+ struct iwx_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+ size_t n_mem_tlv;
+
+ /* Copy of firmware image loader found in file. */
+ uint8_t *iml;
+ size_t iml_len;
+};
+
+struct iwx_nvm_data {
+ int n_hw_addrs;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+
+ int sku_cap_band_24GHz_enable;
+ int sku_cap_band_52GHz_enable;
+ int sku_cap_11n_enable;
+ int sku_cap_11ac_enable;
+ int sku_cap_11ax_enable;
+ int sku_cap_amt_enable;
+ int sku_cap_ipan_enable;
+ int sku_cap_mimo_disable;
+ int lar_enabled;
+
+ uint8_t valid_tx_ant, valid_rx_ant;
+
+ uint16_t nvm_version;
+};
+
+/* max bufs per tfd the driver will use */
+#define IWX_MAX_CMD_TBS_PER_TFD 2
+
+struct iwx_host_cmd {
+ const void *data[IWX_MAX_CMD_TBS_PER_TFD];
+ struct iwx_rx_packet *resp_pkt;
+ size_t resp_pkt_len;
+ unsigned long _rx_page_addr;
+ uint32_t _rx_page_order;
+ int handler_status;
+
+ uint32_t flags;
+ uint16_t len[IWX_MAX_CMD_TBS_PER_TFD];
+ uint8_t dataflags[IWX_MAX_CMD_TBS_PER_TFD];
+ uint32_t id;
+};
+
+/*
+ * DMA glue is from iwn
+ */
+
+struct iwx_dma_info {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_addr_t paddr;
+ void *vaddr;
+ bus_size_t size;
+};
+
+#define IWX_TX_RING_COUNT IWX_DEFAULT_QUEUE_SIZE
+#define IWX_TX_RING_LOMARK 192
+#define IWX_TX_RING_HIMARK 195
+//#define IWX_TX_RING_HIMARK 224
+
+struct iwx_tx_data {
+ bus_dmamap_t map;
+ bus_addr_t cmd_paddr;
+ struct mbuf *m;
+ struct iwx_node *in;
+ int flags;
+#define IWX_TXDATA_FLAG_CMD_IS_NARROW 0x01
+};
+
+struct iwx_tx_ring {
+ struct iwx_dma_info desc_dma;
+ struct iwx_dma_info cmd_dma;
+ struct iwx_dma_info bc_tbl;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct iwx_tx_data data[IWX_TX_RING_COUNT];
+ int qid;
+ int queued;
+ int cur;
+ int cur_hw;
+ int tail;
+ int tail_hw;
+ int tid;
+ bus_dma_tag_t data_dmat;
+};
+
+#define IWX_RX_MQ_RING_COUNT 512
+/* Linux driver optionally uses 8k buffer */
+#define IWX_RBUF_SIZE 4096
+
+struct iwx_rx_data {
+ struct mbuf *m;
+ bus_dmamap_t map;
+};
+
+struct iwx_rx_ring {
+ struct iwx_dma_info free_desc_dma;
+ struct iwx_dma_info stat_dma;
+ struct iwx_dma_info used_desc_dma;
+ void *desc;
+ struct iwx_rb_status *stat;
+ struct iwx_rx_data data[IWX_RX_MQ_RING_COUNT];
+ int cur;
+ bus_dma_tag_t data_dmat;
+};
+
+#define IWX_FLAG_USE_ICT 0x01 /* using Interrupt Cause Table */
+#define IWX_FLAG_RFKILL 0x02 /* radio kill switch is set */
+#define IWX_FLAG_SCANNING 0x04 /* scan in progress */
+#define IWX_FLAG_MAC_ACTIVE 0x08 /* MAC context added to firmware */
+#define IWX_FLAG_BINDING_ACTIVE 0x10 /* MAC->PHY binding added to firmware */
+#define IWX_FLAG_STA_ACTIVE 0x20 /* AP added to firmware station table */
+#define IWX_FLAG_TE_ACTIVE 0x40 /* time event is scheduled */
+#define IWX_FLAG_HW_ERR 0x80 /* hardware error occurred */
+#define IWX_FLAG_SHUTDOWN 0x100 /* shutting down; new tasks forbidden */
+#define IWX_FLAG_BGSCAN 0x200 /* background scan in progress */
+#define IWX_FLAG_TXFLUSH 0x400 /* Tx queue flushing in progress */
+#define IWX_FLAG_HW_INITED 0x800 /* Hardware initialized */
+#define IWX_FLAG_AMPDUTX 0x1000
+
+struct iwx_ucode_status {
+ uint32_t uc_lmac_error_event_table[2];
+ uint32_t uc_umac_error_event_table;
+ uint32_t uc_log_event_table;
+ unsigned int error_event_table_tlv_status;
+
+ int uc_ok;
+ int uc_intr;
+};
+
+#define IWX_ERROR_EVENT_TABLE_LMAC1 (1 << 0)
+#define IWX_ERROR_EVENT_TABLE_LMAC2 (1 << 1)
+#define IWX_ERROR_EVENT_TABLE_UMAC (1 << 2)
+
+#define IWX_CMD_RESP_MAX PAGE_SIZE
+
+/* lower blocks contain EEPROM image and calibration data */
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_7000 16384
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_8000 32768
+
+#define IWX_TE_SESSION_PROTECTION_MAX_TIME_MS 1000
+#define IWX_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+enum IWX_CMD_MODE {
+ IWX_CMD_ASYNC = (1 << 0),
+ IWX_CMD_WANT_RESP = (1 << 1),
+ IWX_CMD_SEND_IN_RFKILL = (1 << 2),
+};
+enum iwx_hcmd_dataflag {
+ IWX_HCMD_DFL_NOCOPY = (1 << 0),
+ IWX_HCMD_DFL_DUP = (1 << 1),
+};
+
+#define IWX_NUM_PAPD_CH_GROUPS 9
+#define IWX_NUM_TXP_CH_GROUPS 9
+
+struct iwx_phy_ctxt {
+ uint16_t id;
+ uint16_t color;
+ uint32_t ref;
+ struct ieee80211_channel *channel;
+ uint8_t sco; /* 40 MHz secondary channel offset */
+ uint8_t vht_chan_width;
+};
+
+struct iwx_bf_data {
+ int bf_enabled; /* filtering */
+ int ba_enabled; /* abort */
+ int ave_beacon_signal;
+ int last_cqm_event;
+};
+
+/**
+ * struct iwx_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @lmac_cnt: number of lmac sections in fw image
+ * @umac_cnt: number of umac sections in fw image
+ * @paging: paging dram data
+ * @paging_cnt: number of paging sections needed by fw image
+ */
+struct iwx_self_init_dram {
+ struct iwx_dma_info *fw;
+ int lmac_cnt;
+ int umac_cnt;
+ struct iwx_dma_info *paging;
+ int paging_cnt;
+};
+
+/**
+ * struct iwx_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @consec_oldsn_drops: consecutive drops due to old SN
+ * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
+ * when to apply old SN consecutive drop workaround
+ * @consec_oldsn_prev_drop: track whether or not an MPDU
+ * that was single/part of the previous A-MPDU was
+ * dropped due to old SN
+ */
+struct iwx_reorder_buffer {
+ uint16_t head_sn;
+ uint16_t num_stored;
+ uint16_t buf_size;
+ uint16_t last_amsdu;
+ uint8_t last_sub_index;
+ struct callout reorder_timer;
+ int removed;
+ int valid;
+ unsigned int consec_oldsn_drops;
+ uint32_t consec_oldsn_ampdu_gp2;
+ unsigned int consec_oldsn_prev_drop;
+#define IWX_AMPDU_CONSEC_DROPS_DELBA 10
+};
+
+/**
+ * struct iwx_reorder_buf_entry - reorder buffer entry per frame sequence number
+ * @frames: list of mbufs stored (A-MSDU subframes share a sequence number)
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct iwx_reorder_buf_entry {
+ struct mbufq frames;
+ struct timeval reorder_time;
+ uint32_t rx_pkt_status;
+ int chanidx;
+ int is_shortpre;
+ uint32_t rate_n_flags;
+ uint32_t device_timestamp;
+ struct ieee80211_rx_stats rxi;
+};
+
+/**
+ * struct iwx_rxba_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue
+ * @last_rx: last rx timestamp, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @sc: softc pointer, needed for timer context
+ * @reorder_buf: reorder buffer
+ * @reorder_buf_data: buffered frames, one entry per sequence number
+ */
+struct iwx_rxba_data {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t baid;
+ uint16_t timeout;
+ uint16_t entries_per_queue;
+ struct timeval last_rx;
+ struct callout session_timer;
+ struct iwx_softc *sc;
+ struct iwx_reorder_buffer reorder_buf;
+ struct iwx_reorder_buf_entry entries[IEEE80211_AGGR_BAWMAX];
+};
+
+static inline struct iwx_rxba_data *
+iwx_rxba_data_from_reorder_buf(struct iwx_reorder_buffer *buf)
+{
+ return (void *)((uint8_t *)buf -
+ offsetof(struct iwx_rxba_data, reorder_buf));
+}
+
+/**
+ * struct iwx_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwx_rxq_dup_data {
+ uint16_t last_seq[IWX_MAX_TID_COUNT + 1];
+ uint8_t last_sub_frame[IWX_MAX_TID_COUNT + 1];
+};
+
+struct iwx_setkey_task_arg {
+ int sta_id;
+ struct ieee80211_node *ni;
+ struct ieee80211_key *k;
+};
+
+struct iwx_ba_task_data {
+ uint32_t start_tidmask;
+ uint32_t stop_tidmask;
+};
+
+
+/*
+ * Device configuration parameters which cannot be detected based on
+ * PCI vendor/product ID alone.
+ */
+struct iwx_device_cfg {
+ const char *fw_name;
+ const char *pnvm_name;
+ int tx_with_siso_diversity;
+ int uhb_supported;
+ int xtal_latency;
+ int low_latency_xtal;
+};
+
+/* Firmware listed here must be available in fw_update(8). */
+#define IWX_CC_A_FW "iwx-cc-a0-77"
+#define IWX_TY_A_GF_A_FW "iwx-ty-a0-gf-a0-77"
+#define IWX_TY_A_GF_A_PNVM "iwx-ty-a0-gf-a0.pnvm"
+#define IWX_QU_B_HR_B_FW "iwx-Qu-b0-hr-b0-77"
+#define IWX_QU_B_JF_B_FW "iwx-Qu-b0-jf-b0-77"
+#define IWX_QU_C_HR_B_FW "iwx-Qu-c0-hr-b0-77"
+#define IWX_QU_C_JF_B_FW "iwx-Qu-c0-jf-b0-77"
+#define IWX_QUZ_A_HR_B_FW "iwx-QuZ-a0-hr-b0-77"
+#define IWX_QUZ_A_JF_B_FW "iwx-QuZ-a0-jf-b0-77"
+#define IWX_SO_A_GF_A_FW "iwx-so-a0-gf-a0-77"
+#define IWX_SO_A_GF_A_PNVM "iwx-so-a0-gf-a0.pnvm"
+#define IWX_SO_A_GF4_A_FW "iwx-so-a0-gf4-a0-77"
+#define IWX_SO_A_GF4_A_PNVM "iwx-so-a0-gf4-a0.pnvm"
+#define IWX_SO_A_HR_B_FW "iwx-so-a0-hr-b0-77"
+#define IWX_SO_A_JF_B_FW "iwx-so-a0-jf-b0-77"
+
+const struct iwx_device_cfg iwx_9560_quz_a0_jf_b0_cfg = {
+ .fw_name = IWX_QUZ_A_JF_B_FW,
+};
+
+const struct iwx_device_cfg iwx_9560_qu_c0_jf_b0_cfg = {
+ .fw_name = IWX_QU_C_JF_B_FW,
+};
+
+const struct iwx_device_cfg iwx_qu_b0_hr1_b0 = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwx_device_cfg iwx_qu_b0_hr_b0 = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_qu_hr = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_qu_c0_hr1_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwx_device_cfg iwx_qu_c0_hr_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_qu_c0_hr_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_quz_a0_hr1_b0 = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_quz_hr = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_cfg_so_a0_hr_b0 = {
+ .fw_name = IWX_SO_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_cfg_quz_a0_hr_b0 = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf_a0 = {
+ .fw_name = IWX_SO_A_GF_A_FW,
+ .pnvm_name = IWX_SO_A_GF_A_PNVM,
+ .uhb_supported = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf_a0_long = {
+ .fw_name = IWX_SO_A_GF_A_FW,
+ .pnvm_name = IWX_SO_A_GF_A_PNVM,
+ .uhb_supported = 1,
+ .xtal_latency = 12000,
+ .low_latency_xtal = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf4_a0 = {
+ .fw_name = IWX_SO_A_GF4_A_FW,
+ .pnvm_name = IWX_SO_A_GF4_A_PNVM,
+ .uhb_supported = 1,
+ .xtal_latency = 12000,
+ .low_latency_xtal = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf4_a0_long = {
+ .fw_name = IWX_SO_A_GF4_A_FW,
+ .pnvm_name = IWX_SO_A_GF4_A_PNVM,
+ .uhb_supported = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_ty_gf_a0 = {
+ .fw_name = IWX_TY_A_GF_A_FW,
+ .pnvm_name = IWX_TY_A_GF_A_PNVM,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_jf_b0 = {
+ .fw_name = IWX_SO_A_JF_B_FW,
+};
+
+#define IWX_CFG_ANY (~0)
+
+#define IWX_CFG_MAC_TYPE_QU 0x33
+#define IWX_CFG_MAC_TYPE_QUZ 0x35
+#define IWX_CFG_MAC_TYPE_QNJ 0x36
+#define IWX_CFG_MAC_TYPE_SO 0x37
+#define IWX_CFG_MAC_TYPE_SNJ 0x42
+#define IWX_CFG_MAC_TYPE_SOF 0x43
+#define IWX_CFG_MAC_TYPE_MA 0x44
+#define IWX_CFG_MAC_TYPE_BZ 0x46
+#define IWX_CFG_MAC_TYPE_GL 0x47
+
+#define IWX_CFG_RF_TYPE_JF2 0x105
+#define IWX_CFG_RF_TYPE_JF1 0x108
+#define IWX_CFG_RF_TYPE_HR2 0x10a
+#define IWX_CFG_RF_TYPE_HR1 0x10c
+#define IWX_CFG_RF_TYPE_GF 0x10d
+#define IWX_CFG_RF_TYPE_MR 0x110
+#define IWX_CFG_RF_TYPE_MS 0x111
+#define IWX_CFG_RF_TYPE_FM 0x112
+
+#define IWX_CFG_RF_ID_JF 0x3
+#define IWX_CFG_RF_ID_JF1 0x6
+#define IWX_CFG_RF_ID_JF1_DIV 0xa
+
+#define IWX_CFG_NO_160 0x1
+#define IWX_CFG_160 0x0
+
+#define IWX_CFG_CORES_BT 0x0
+
+#define IWX_CFG_NO_CDB 0x0
+#define IWX_CFG_CDB 0x1
+
+#define IWX_SUBDEVICE_RF_ID(subdevice) ((uint16_t)((subdevice) & 0x00f0) >> 4)
+#define IWX_SUBDEVICE_NO_160(subdevice) ((uint16_t)((subdevice) & 0x0200) >> 9)
+#define IWX_SUBDEVICE_CORES(subdevice) ((uint16_t)((subdevice) & 0x1c00) >> 10)
+
+struct iwx_rx_ba {
+// struct timeout ba_to;
+ int ba_timeout_val;
+ u_int16_t ba_params;
+ u_int16_t ba_winstart;
+ u_int16_t ba_winend;
+ u_int16_t ba_winsize;
+#define IWX_BA_DONE 1
+ int ba_flags;
+};
+
+struct iwx_softc {
+ device_t sc_dev;
+ struct ieee80211com sc_ic;
+ int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
+ int sc_newstate_pending;
+ int attached;
+
+ struct task init_task; /* NB: not reference-counted */
+ //XXX:misha unknown in FreeBSD
+// struct refcnt task_refs;
+ struct task newstate_task;
+ enum ieee80211_state ns_nstate;
+ int ns_arg;
+
+ /* Task for firmware BlockAck setup/teardown and its arguments. */
+ struct task ba_rx_task;
+ struct task ba_tx_task;
+ struct iwx_ba_task_data ba_rx;
+ struct iwx_ba_task_data ba_tx;
+
+ /* Task for setting encryption keys and its arguments. */
+ struct task setkey_task;
+ /*
+ * At present we need to process at most two keys at once:
+ * Our pairwise key and a group key.
+ * When hostap mode is implemented this array needs to grow or
+ * it might become a bottleneck for associations that occur at
+ * roughly the same time.
+ */
+ struct iwx_setkey_task_arg setkey_arg[2];
+ int setkey_cur;
+ int setkey_tail;
+ int setkey_nkeys;
+
+ /* Task for ERP/HT prot/slot-time/EDCA updates. */
+ struct task mac_ctxt_task;
+
+ /* Task for HT 20/40 MHz channel width updates. */
+ struct task phy_ctxt_task;
+
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_size_t sc_sz;
+ bus_dma_tag_t sc_dmat;
+
+ //XXX:misha unknown in FreeBSD
+// pci_product_id_t sc_pid;
+// pci_chipset_tag_t sc_pct;
+// pcitag_t sc_pcitag;
+ u_int16_t sc_pid;
+ void *sc_pct;
+ u_int32_t sc_pcitag;
+
+ void *sc_ih;
+ int sc_msix;
+
+ /* TX/RX rings. */
+ struct iwx_tx_ring txq[IWX_NUM_TX_QUEUES];
+ struct iwx_rx_ring rxq;
+ int qfullmsk;
+ int qenablemsk;
+ int first_data_qid;
+ int aggqid[WME_NUM_TID];
+ int max_tfd_queue_size;
+
+ int sc_sf_state;
+
+ /* ICT table. */
+ struct iwx_dma_info ict_dma;
+ int ict_cur;
+
+ int sc_hw_rev;
+#define IWX_SILICON_A_STEP 0
+#define IWX_SILICON_B_STEP 1
+#define IWX_SILICON_C_STEP 2
+#define IWX_SILICON_Z_STEP 0xf
+ int sc_hw_id;
+ int sc_hw_rf_id;
+ int sc_device_family;
+#define IWX_DEVICE_FAMILY_22000 1
+#define IWX_DEVICE_FAMILY_AX210 2
+ uint32_t sc_sku_id[3];
+ uint32_t mac_addr_from_csr;
+
+ struct iwx_dma_info ctxt_info_dma;
+ struct iwx_self_init_dram init_dram;
+ struct iwx_dma_info prph_scratch_dma;
+ struct iwx_dma_info prph_info_dma;
+ struct iwx_dma_info iml_dma;
+ struct iwx_dma_info pnvm_dma;
+ uint32_t sc_pnvm_ver;
+
+ int sc_fw_chunk_done;
+ int sc_init_complete;
+#define IWX_INIT_COMPLETE 0x01
+#define IWX_CALIB_COMPLETE 0x02
+#define IWX_PNVM_COMPLETE 0x04
+
+ struct iwx_ucode_status sc_uc;
+ char sc_fwver[32];
+
+ int sc_capaflags;
+ int sc_capa_max_probe_len;
+ int sc_capa_n_scan_channels;
+ uint8_t sc_ucode_api[howmany(IWX_NUM_UCODE_TLV_API, NBBY)];
+ uint8_t sc_enabled_capa[howmany(IWX_NUM_UCODE_TLV_CAPA, NBBY)];
+#define IWX_MAX_FW_CMD_VERSIONS 704
+ struct iwx_fw_cmd_version cmd_versions[IWX_MAX_FW_CMD_VERSIONS];
+ int n_cmd_versions;
+ int sc_rate_n_flags_version;
+
+ int sc_intmask;
+ int sc_flags;
+
+ uint32_t sc_fh_init_mask;
+ uint32_t sc_hw_init_mask;
+ uint32_t sc_fh_mask;
+ uint32_t sc_hw_mask;
+
+ int sc_generation;
+
+ struct rwlock ioctl_rwl;
+
+ int sc_cap_off; /* PCIe caps */
+
+ const char *sc_fwname;
+ struct iwx_fw_info sc_fw;
+ struct iwx_dma_info fw_mon;
+ int sc_fw_phy_config;
+ struct iwx_tlv_calib_ctrl sc_default_calib[IWX_UCODE_TYPE_MAX];
+
+ struct iwx_nvm_data sc_nvm;
+ struct iwx_bf_data sc_bf;
+ const char *sc_pnvm_name;
+
+ int sc_tx_timer[IWX_NUM_TX_QUEUES];
+ int sc_rx_ba_sessions;
+
+ struct task bgscan_done_task;
+ struct ieee80211_node_switch_bss_arg *bgscan_unref_arg;
+ size_t bgscan_unref_arg_size;
+
+ int sc_scan_last_antenna;
+
+ int sc_staid;
+ int sc_nodecolor;
+
+ uint8_t *sc_cmd_resp_pkt[IWX_TX_RING_COUNT];
+ size_t sc_cmd_resp_len[IWX_TX_RING_COUNT];
+ int sc_nic_locks;
+
+ struct taskq *sc_nswq;
+
+ struct iwx_rx_phy_info sc_last_phy_info;
+ int sc_ampdu_ref;
+ struct iwx_rxba_data sc_rxba_data[IWX_MAX_BAID];
+
+ uint32_t sc_time_event_uid;
+
+ /* phy contexts. we only use the first one */
+ struct iwx_phy_ctxt sc_phyctxt[IWX_NUM_PHY_CTX];
+
+ struct iwx_notif_statistics sc_stats;
+ int sc_noise;
+
+ int sc_pm_support;
+ int sc_ltr_enabled;
+
+ int sc_integrated;
+ int sc_tx_with_siso_diversity;
+ int sc_max_tfd_queue_size;
+ int sc_ltr_delay;
+ int sc_xtal_latency;
+ int sc_low_latency_xtal;
+ int sc_uhb_supported;
+ int sc_umac_prph_offset;
+ int sc_imr_enabled;
+
+//#if NBPFILTER > 0
+ caddr_t sc_drvbpf;
+
+ union {
+ struct iwx_rx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_rxtapu;
+#define sc_rxtap sc_rxtapu.th
+ int sc_rxtap_len;
+
+ union {
+ struct iwx_tx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_txtapu;
+#define sc_txtap sc_txtapu.th
+ int sc_txtap_len;
+//#endif
+
+ //XXX:misha FreeBSD specific
+ struct mtx sc_mtx;
+ struct resource *sc_mem;
+ struct resource *sc_irq;
+ struct intr_config_hook sc_preinit_hook;
+ struct task sc_es_task;
+ struct mbufq sc_snd;
+ struct iwx_rx_ba ni_rx_ba[WME_NUM_TID];
+ struct taskqueue *sc_tq;
+ int (*sc_ampdu_rx_start)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int, int, int);
+ void (*sc_ampdu_rx_stop)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+ int (*sc_addba_request)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ int (*sc_addba_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ struct callout watchdog_to;
+ const struct firmware *fwp;
+
+ struct iwx_scan_req_umac_v14 sc_umac_v14_cmd;
+
+ // XXX-THJ backwards compat stuff
+ int sc_rsp_vers;
+ union {
+ struct iwx_nvm_get_info_rsp rsp_v4;
+ struct iwx_nvm_get_info_rsp_v3 rsp_v3;
+
+ } sc_rsp_info;
+ uint32_t sc_debug;
+
+ // TODO:misha addba_stop?
+
+ // TEMP
+// struct ieee80211_node *ba_ni;
+// struct ieee80211_rx_ampdu *ba_rap;
+// int ba_baparamset;
+// int ba_timeout;
+// int ba_baseqctl;
+};
+
+#define IWX_LOCK_INIT(_sc) \
+ mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
+ MTX_NETWORK_LOCK, MTX_DEF);
+#define IWX_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define IWX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define IWX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
+#define IWX_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED);
+
+struct iwx_vap {
+ struct ieee80211vap iv_vap;
+ int is_uploaded;
+ int iv_auth;
+
+ int (*iv_newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+
+ struct iwx_phy_ctxt *phy_ctxt;
+
+ uint16_t id;
+ uint16_t color;
+
+ boolean_t have_wme;
+ /*
+ * QoS data from net80211, need to store this here
+ * as net80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint16_t edca_txop;
+ uint8_t aifsn;
+ } queue_params[WME_NUM_AC];
+
+ /* indicates that this interface requires PS to be disabled */
+ boolean_t ps_disabled;
+};
+#define IWX_VAP(_vap) ((struct iwx_vap *)(_vap))
+
+struct iwx_node {
+ struct ieee80211_node in_ni;
+ struct iwx_phy_ctxt *in_phyctxt;
+ uint8_t in_macaddr[ETHER_ADDR_LEN];
+
+ uint16_t in_id;
+ uint16_t in_color;
+
+ struct iwx_rxq_dup_data dup_data;
+
+ int in_flags;
+#define IWX_NODE_FLAG_HAVE_PAIRWISE_KEY 0x01
+#define IWX_NODE_FLAG_HAVE_GROUP_KEY 0x02
+};
+
+#define IWX_NODE(_ni) ((struct iwx_node *)(_ni))
+
+#define IWX_STATION_ID 0
+#define IWX_AUX_STA_ID 1
+
+#define IWX_DEFAULT_MACID 0
+#define IWX_DEFAULT_COLOR 0
+#define IWX_DEFAULT_TSFID 0
+
+#define IWX_STATION_ID 0
+#define IWX_AUX_STA_ID 1
+#define IWX_MONITOR_STA_ID 2
+
+#define IWX_ICT_SIZE 4096
+#define IWX_ICT_COUNT (IWX_ICT_SIZE / sizeof (uint32_t))
+#define IWX_ICT_PADDR_SHIFT 12
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -203,6 +203,7 @@
${_ix} \
${_ixv} \
${_ixl} \
+ iwx \
jme \
kbdmux \
kgssapi \
diff --git a/sys/modules/iwx/Makefile b/sys/modules/iwx/Makefile
new file mode 100644
--- /dev/null
+++ b/sys/modules/iwx/Makefile
@@ -0,0 +1,12 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/dev/iwx
+
+KMOD= if_iwx
+SRCS= if_iwx.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_wlan.h
+
+CFLAGS+= -DIWX_DEBUG
+DEBUG_FLAGS= -O0 -g
+
+.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Fri, Feb 6, 12:27 PM (21 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28448399
Default Alt Text
D49259.id151965.diff (641 KB)

Event Timeline