diff --git a/sys/contrib/dev/iwlwifi/cfg/22000.c b/sys/contrib/dev/iwlwifi/cfg/22000.c index 918dd0f6f8b5..8ff967edc8f0 100644 --- a/sys/contrib/dev/iwlwifi/cfg/22000.c +++ b/sys/contrib/dev/iwlwifi/cfg/22000.c @@ -1,994 +1,1009 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include #include #include "iwl-config.h" #include "iwl-prph.h" #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 70 +#define IWL_22000_UCODE_API_MAX 72 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ #define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */ #define IWL_22000_DCCM2_OFFSET 0x880000 #define IWL_22000_DCCM2_LEN 0x8000 #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 #define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-" #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" #define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" #define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" #define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" #define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" #define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" #define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-" #define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" #define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" #define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" #define IWL_SNJ_A_JF_B_FW_PRE "iwlwifi-SoSnj-a0-jf-b0-" #define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0-" #define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-" #define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-" #define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" #define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-" #define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" #define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-" #define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" #define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" #define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" #define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" #define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-" #define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-" #define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-" #define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-" #define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-" #define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-" #define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \ IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ IWL_CC_A_FW_PRE __stringify(api) ".ucode" #define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \ IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \ IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \ IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \ IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode" #define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \ IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \ IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_base_params iwl_ax210_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl_22000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), }; #define IWL_DEVICE_22000_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = 0x380, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .trans.use_tfh = true, \ .trans.rf_id = true, \ .trans.gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 60 * 1024, \ .mon_smem_regs = { \ .write_ptr = { \ .addr = LDBG_M2S_BUF_WPTR, \ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ }, \ .cycle_cnt = { \ .addr = LDBG_M2S_BUF_WRAP_CNT, \ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ }, \ } #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ .trans.device_family = IWL_DEVICE_FAMILY_22000, \ .trans.base_params = &iwl_22000_base_params, \ .gp2_reg_addr = 0xa02c68, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = MON_BUFF_WRPTR_VER2, \ .mask = 0xffffffff, \ }, \ .cycle_cnt = { \ .addr = MON_BUFF_CYCLE_CNT_VER2, \ .mask = 0xffffffff, \ }, \ } #define IWL_DEVICE_AX210 \ IWL_DEVICE_22000_COMMON, \ .trans.umac_prph_offset = 0x300000, \ .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ .trans.base_params = &iwl_ax210_base_params, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ }, \ .cycle_cnt = { \ .addr = DBGC_DBGBUF_WRAP_AROUND, \ .mask = 0xffffffff, \ }, \ .cur_frag = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ }, \ } #define IWL_DEVICE_BZ_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = 0x30, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .trans.use_tfh = true, \ .trans.rf_id = true, \ .trans.gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 60 * 1024, \ .mon_smem_regs = { \ .write_ptr = { \ .addr = LDBG_M2S_BUF_WPTR, \ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ }, \ .cycle_cnt = { \ .addr = LDBG_M2S_BUF_WRAP_CNT, \ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ }, \ } #define IWL_DEVICE_BZ \ IWL_DEVICE_BZ_COMMON, \ .trans.umac_prph_offset = 0x300000, \ .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ .trans.base_params = &iwl_ax210_base_params, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ }, \ .cycle_cnt = { \ .addr = DBGC_DBGBUF_WRAP_AROUND, \ .mask = 0xffffffff, \ }, \ .cur_frag = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ }, \ }, \ .mon_dbgi_regs = { \ .write_ptr = { \ .addr = DBGI_SRAM_FIFO_POINTERS, \ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ }, \ } const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, }; const struct iwl_cfg_trans_params iwl_qu_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 500, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, }; const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 1820, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US, }; const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 12000, .low_latency_xtal = true, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; const struct iwl_cfg_trans_params iwl_snj_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, .base_params = &iwl_ax210_base_params, .umac_prph_offset = 0x300000, }; const struct iwl_cfg_trans_params iwl_so_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, .base_params = &iwl_ax210_base_params, .umac_prph_offset = 0x300000, .integrated = true, /* TODO: the following values need to be checked */ .xtal_latency = 500, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, }; const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, .base_params = &iwl_ax210_base_params, .umac_prph_offset = 0x300000, .integrated = true, .low_latency_xtal = true, .xtal_latency = 12000, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; +const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { + .mq_rx_supported = true, + .use_tfh = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + .low_latency_xtal = true, + .xtal_latency = 12000, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, + .imr_enabled = true, +}; + /* * If the device doesn't support HE, no need to have that many buffers. * 22000 devices can split multiple frames into a single RB, so fewer are * needed; AX210 cannot (but use smaller RBs by default) - these sizes * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with * additional overhead to account for processing time. */ #define IWL_NUM_RBDS_NON_HE 512 #define IWL_NUM_RBDS_22000_HE 2048 #define IWL_NUM_RBDS_AX210_HE 4096 /* * All JF radio modules are part of the 9000 series, but the MAC part * looks more like 22000. That's why this device is here, but called * 9560 nevertheless. */ const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = { .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = { .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = { .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg = { .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .bisr_workaround = 1, }; const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_AX210, .base_params = &iwl_ax210_base_params, .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000 }; const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, .base_params = &iwl_ax210_base_params, .mq_rx_supported = true, .use_tfh = true, .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000, .xtal_latency = 12000, .low_latency_xtal = true, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz"; const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; const char iwl_ax200_killer_1650x_name[] = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)"; const char iwl_ax201_killer_1650s_name[] = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; const char iwl_ax210_killer_1675w_name[] = "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; const char iwl_ax210_killer_1675x_name[] = "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; const char iwl_ax211_killer_1675s_name[] = "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; const char iwl_ax211_killer_1675i_name[] = "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; const char iwl_ax411_killer_1690s_name[] = "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; const char iwl_ax411_killer_1690i_name[] = "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_b0_hr_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_c0_hr1_b0 = { .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_c0_hr_b0 = { .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_quz_a0_hr1_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_quz_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax200_cfg_cc = { .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { .fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, .trans.xtal_latency = 12000, .trans.low_latency_xtal = true, }; const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { .name = "Intel(R) Wi-Fi 6 AX210 160MHz", .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { .name = iwl_ax411_name, .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { .name = iwl_ax411_name, .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, .trans.xtal_latency = 12000, .trans.low_latency_xtal = true, }; const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = { .name = iwl_ax411_name, .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = { .name = iwl_ax211_name, .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_snj_hr_b0 = { .fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = { .fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_hr_b0 = { .fw_name_pre = IWL_MA_A_HR_B_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = { .fw_name_pre = IWL_MA_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0 = { .fw_name_pre = IWL_MA_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = { .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, .uhb_supported = false, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, .uhb_supported = false, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = { .fw_name_pre = IWL_MA_A_FM_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = { .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = { .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, .uhb_supported = false, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = { .fw_name_pre = IWL_BZ_A_FM_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { .fw_name_pre = IWL_GL_A_FM_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = { .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = { .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = { .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = { .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = { .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = { .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE, .uhb_supported = true, IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/fw/acpi.c b/sys/contrib/dev/iwlwifi/fw/acpi.c index 0e9e61508ae5..33aae639ad37 100644 --- a/sys/contrib/dev/iwlwifi/fw/acpi.c +++ b/sys/contrib/dev/iwlwifi/fw/acpi.c @@ -1,971 +1,1177 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2022 Intel Corporation */ #include +#include #include "iwl-drv.h" #include "iwl-debug.h" #include "acpi.h" #include "fw/runtime.h" const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6, 0xA5, 0xB3, 0x1F, 0x73, 0x8E, 0x28, 0x5A, 0xDE); IWL_EXPORT_SYMBOL(iwl_guid); const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, 0x81, 0x4F, 0x75, 0xE4, 0xDD, 0x26, 0xB5, 0xFD); IWL_EXPORT_SYMBOL(iwl_rfi_guid); +static const struct dmi_system_id dmi_ppag_approved_list[] = { + { .ident = "HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + }, + }, + { .ident = "SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "MSFT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + }, + }, + { .ident = "ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), + }, + }, + {} +}; + static int iwl_acpi_get_handle(struct device *dev, acpi_string method, acpi_handle *ret_handle) { acpi_handle root_handle; acpi_status status; root_handle = ACPI_HANDLE(dev); if (!root_handle) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: Could not retrieve root port handle\n"); return -ENOENT; } status = acpi_get_handle(root_handle, method, ret_handle); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: %s method not found\n", method); return -ENOENT; } return 0; } void *iwl_acpi_get_object(struct device *dev, acpi_string method) { struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_handle handle; acpi_status status; int ret; ret = iwl_acpi_get_handle(dev, method, &handle); if (ret) return ERR_PTR(-ENOENT); /* Call the method with no arguments */ status = acpi_evaluate_object(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: %s method invocation failed (status: 0x%x)\n", method, status); return ERR_PTR(-ENOENT); } return buf.pointer; } IWL_EXPORT_SYMBOL(iwl_acpi_get_object); /* * Generic function for evaluating a method defined in the device specific * method (DSM) interface. The returned acpi object must be freed by calling * function. */ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args, const guid_t *guid) { union acpi_object *obj; obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func, args); if (!obj) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method invocation failed (rev: %d, func:%d)\n", rev, func); return ERR_PTR(-ENOENT); } return obj; } /* * Generic function to evaluate a DSM with no arguments * and an integer return value, * (as an integer object or inside a buffer object), * verify and assign the value in the "value" parameter. * return 0 in success and the appropriate errno otherwise. */ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, const guid_t *guid, u64 *value, size_t expected_size) { union acpi_object *obj; int ret = 0; obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid); if (IS_ERR(obj)) { IWL_DEBUG_DEV_RADIO(dev, "Failed to get DSM object. func= %d\n", func); return -ENOENT; } if (obj->type == ACPI_TYPE_INTEGER) { *value = obj->integer.value; } else if (obj->type == ACPI_TYPE_BUFFER) { __le64 le_value = 0; if (WARN_ON_ONCE(expected_size > sizeof(le_value))) return -EINVAL; /* if the buffer size doesn't match the expected size */ if (obj->buffer.length != expected_size) IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM invalid buffer size, padding or truncating (%d)\n", obj->buffer.length); /* assuming LE from Intel BIOS spec */ memcpy(&le_value, obj->buffer.pointer, min_t(size_t, expected_size, (size_t)obj->buffer.length)); *value = le64_to_cpu(le_value); } else { IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method did not return a valid object, type=%d\n", obj->type); ret = -EINVAL; goto out; } IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method evaluated: func=%d, ret=%d\n", func, ret); out: ACPI_FREE(obj); return ret; } /* * Evaluate a DSM with no arguments and a u8 return value, */ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value) { int ret; u64 val; ret = iwl_acpi_get_dsm_integer(dev, rev, func, guid, &val, sizeof(u8)); if (ret < 0) return ret; /* cast val (u64) to be u8 */ *value = (u8)val; return 0; } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); /* * Evaluate a DSM with no arguments and a u32 return value, */ int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value) { int ret; u64 val; ret = iwl_acpi_get_dsm_integer(dev, rev, func, guid, &val, sizeof(u32)); if (ret < 0) return ret; /* cast val (u64) to be u32 */ *value = (u32)val; return 0; } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, union acpi_object *data, int min_data_size, int max_data_size, int *tbl_rev) { int i; union acpi_object *wifi_pkg; /* * We need at least one entry in the wifi package that * describes the domain, and one more entry, otherwise there's * no point in reading it. */ if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size)) return ERR_PTR(-EINVAL); /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid * (i.e. it is an integer (each caller has to check by itself * if the returned revision is supported)). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || data->package.elements[0].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n"); return ERR_PTR(-EINVAL); } *tbl_rev = data->package.elements[0].integer.value; /* loop through all the packages to find the one for WiFi */ for (i = 1; i < data->package.count; i++) { union acpi_object *domain; wifi_pkg = &data->package.elements[i]; /* skip entries that are not a package with the right size */ if (wifi_pkg->type != ACPI_TYPE_PACKAGE || wifi_pkg->package.count < min_data_size || wifi_pkg->package.count > max_data_size) continue; domain = &wifi_pkg->package.elements[0]; if (domain->type == ACPI_TYPE_INTEGER && domain->integer.value == ACPI_WIFI_DOMAIN) goto found; } return ERR_PTR(-ENOENT); found: return wifi_pkg; } IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev, i, block_list_size, enabled; data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* try to read wtas table revision 1 or revision 0*/ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WTAS_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (tbl_rev == 1 && wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { u32 tas_selection = (u32)wifi_pkg->package.elements[1].integer.value; u16 override_iec = (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS; u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >> ACPI_WTAS_ENABLE_IEC_POS; u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS; enabled = tas_selection & ACPI_WTAS_ENABLED_MSK; if (fw_ver <= 3) { cmd->v3.override_tas_iec = cpu_to_le16(override_iec); cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec); } else { cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb; cmd->v4.override_tas_iec = (u8)override_iec; cmd->v4.enable_tas_iec = (u8)enabled_iec; } } else if (tbl_rev == 0 && wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { enabled = !!wifi_pkg->package.elements[1].integer.value; } else { ret = -EINVAL; goto out_free; } if (!enabled) { IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); ret = 0; goto out_free; } IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev); if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].integer.value > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", wifi_pkg->package.elements[2].integer.value); ret = -EINVAL; goto out_free; } block_list_size = wifi_pkg->package.elements[2].integer.value; cmd->v4.block_list_size = cpu_to_le32(block_list_size); IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size); if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", block_list_size); ret = -EINVAL; goto out_free; } for (i = 0; i < block_list_size; i++) { u32 country; if (wifi_pkg->package.elements[3 + i].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array elem %d\n", 3 + i); ret = -EINVAL; goto out_free; } country = wifi_pkg->package.elements[3 + i].integer.value; cmd->v4.block_list_array[i] = cpu_to_le32(country); IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } ret = 1; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_tas); int iwl_acpi_get_mcc(struct device *dev, char *mcc) { union acpi_object *wifi_pkg, *data; u32 mcc_val; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || tbl_rev != 0) { ret = -EINVAL; goto out_free; } mcc_val = wifi_pkg->package.elements[1].integer.value; mcc[0] = (mcc_val >> 8) & 0xff; mcc[1] = mcc_val & 0xff; mcc[2] = '\0'; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev) { union acpi_object *data, *wifi_pkg; u64 dflt_pwr_limit; int tbl_rev; data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); if (IS_ERR(data)) { dflt_pwr_limit = 0; goto out; } wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0 || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { dflt_pwr_limit = 0; goto out_free; } dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; out_free: kfree(data); out: return dflt_pwr_limit; } IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || tbl_rev != 0) { ret = -EINVAL; goto out_free; } *extl_clk = wifi_pkg->package.elements[1].integer.value; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); static int iwl_sar_set_profile(union acpi_object *table, struct iwl_sar_profile *profile, bool enabled, u8 num_chains, u8 num_sub_bands) { int i, j, idx = 0; /* * The table from ACPI is flat, but we store it in a * structured array. */ for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { /* if we don't have the values, use the default */ if (i >= num_chains || j >= num_sub_bands) { profile->chains[i].subbands[j] = 0; } else { if (table[idx].type != ACPI_TYPE_INTEGER || table[idx].integer.value > U8_MAX) return -EINVAL; profile->chains[i].subbands[j] = table[idx].integer.value; idx++; } } } /* Only if all values were valid can the profile be enabled */ profile->enabled = enabled; return 0; } static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_subbands, int prof_a, int prof_b) { int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; int i, j; for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { struct iwl_sar_profile *prof; /* don't allow SAR to be disabled (profile 0 means disable) */ if (profs[i] == 0) return -EPERM; /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */ if (profs[i] > ACPI_SAR_PROFILE_NUM) return -EINVAL; /* profiles go from 1 to 4, so decrement to access the array */ prof = &fwrt->sar_profiles[profs[i] - 1]; /* if the profile is disabled, do nothing */ if (!prof->enabled) { IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", profs[i]); /* * if one of the profiles is disabled, we * ignore all of them and return 1 to * differentiate disabled from other failures. */ return 1; } IWL_DEBUG_INFO(fwrt, "SAR EWRD: chain %d profile index %d\n", i, profs[i]); IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); for (j = 0; j < n_subbands; j++) { per_chain[i * n_subbands + j] = cpu_to_le16(prof->chains[i].subbands[j]); IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", j, prof->chains[i].subbands[j]); } } return 0; } int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b) { int i, ret = 0; for (i = 0; i < n_tables; i++) { ret = iwl_sar_fill_table(fwrt, &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], n_subbands, prof_a, prof_b); if (ret) break; } return ret; } IWL_EXPORT_SYMBOL(iwl_sar_select_profile); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *table, *data; - bool enabled; int ret, tbl_rev; + u32 flags; u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; } /* then try revision 1 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; } /* then finally revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV0, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; } ret = PTR_ERR(wifi_pkg); goto out_free; read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); - enabled = !!(wifi_pkg->package.elements[1].integer.value); + flags = wifi_pkg->package.elements[1].integer.value; + fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS; /* position of the actual table */ table = &wifi_pkg->package.elements[2]; /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ - ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled, + ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], + flags & IWL_SAR_ENABLE_MSK, num_chains, num_sub_bands); out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table); int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; } /* then try revision 1 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; } /* then finally revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV0, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; } ret = PTR_ERR(wifi_pkg); goto out_free; read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; /* * Check the validity of n_profiles. The EWRD profiles start * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } /* the tables start at element 3 */ pos = 3; for (i = 0; i < n_profiles; i++) { /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. */ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], &fwrt->sar_profiles[i + 1], enabled, num_chains, num_sub_bands); if (ret < 0) break; /* go to the next table */ pos += num_chains * num_sub_bands; } out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; int i, j, k, ret, tbl_rev; u8 num_bands, num_profiles; static const struct { u8 revisions; u8 bands; u8 profiles; u8 min_profiles; } rev_data[] = { { .revisions = BIT(3), .bands = ACPI_GEO_NUM_BANDS_REV2, .profiles = ACPI_NUM_GEO_PROFILES_REV3, .min_profiles = 3, }, { .revisions = BIT(2), .bands = ACPI_GEO_NUM_BANDS_REV2, .profiles = ACPI_NUM_GEO_PROFILES, }, { .revisions = BIT(0) | BIT(1), .bands = ACPI_GEO_NUM_BANDS_REV0, .profiles = ACPI_NUM_GEO_PROFILES, }, }; int idx; /* start from one to skip the domain */ int entry_idx = 1; BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES); data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* read the highest revision we understand first */ for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) { /* min_profiles != 0 requires num_profiles header */ u32 hdr_size = 1 + !!rev_data[idx].min_profiles; u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE * rev_data[idx].bands; u32 max_size = hdr_size + profile_size * rev_data[idx].profiles; u32 min_size; if (!rev_data[idx].min_profiles) min_size = max_size; else min_size = hdr_size + profile_size * rev_data[idx].min_profiles; wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data, min_size, max_size, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (!(BIT(tbl_rev) & rev_data[idx].revisions)) continue; num_bands = rev_data[idx].bands; num_profiles = rev_data[idx].profiles; if (rev_data[idx].min_profiles) { /* read header that says # of profiles */ union acpi_object *entry; entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > num_profiles) { ret = -EINVAL; goto out_free; } num_profiles = entry->integer.value; /* * this also validates >= min_profiles since we * otherwise wouldn't have gotten the data when * looking up in ACPI */ if (wifi_pkg->package.count != hdr_size + profile_size * num_profiles) { ret = -EINVAL; goto out_free; } } goto read_table; } } if (idx < ARRAY_SIZE(rev_data)) ret = PTR_ERR(wifi_pkg); else ret = -ENOENT; goto out_free; read_table: fwrt->geo_rev = tbl_rev; for (i = 0; i < num_profiles; i++) { for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { union acpi_object *entry; /* * num_bands is either 2 or 3, if it's only 2 then * fill the third band (6 GHz) with the values from * 5 GHz (second band) */ if (j >= num_bands) { fwrt->geo_profiles[i].bands[j].max = fwrt->geo_profiles[i].bands[1].max; } else { entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; goto out_free; } fwrt->geo_profiles[i].bands[j].max = entry->integer.value; } for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { /* same here as above */ if (j >= num_bands) { fwrt->geo_profiles[i].bands[j].chains[k] = fwrt->geo_profiles[i].bands[1].chains[k]; } else { entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; goto out_free; } fwrt->geo_profiles[i].bands[j].chains[k] = entry->integer.value; } } } } fwrt->geo_num_profiles = num_profiles; fwrt->geo_enabled = true; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) { /* * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on * earlier firmware versions. Unfortunately, we don't have a * TLV API flag to rely on, so rely on the major version which * is in the first byte of ucode_ver. This was implemented * initially on version 38 and then backported to 17. It was * also backported to 29, but only for 7265D devices. The * intention was to have it in 36 as well, but not all 8000 * family got this feature enabled. The 8000 family is the * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == - CSR_HW_REV_TYPE_7265D)); + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && + fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles) { int i, j; if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; for (i = 0; i < n_profiles; i++) { for (j = 0; j < n_bands; j++) { struct iwl_per_chain_offset *chain = &table[i * n_bands + j]; chain->max_tx_power = cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; IWL_DEBUG_RADIO(fwrt, "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", i, j, fwrt->geo_profiles[i].bands[j].chains[0], fwrt->geo_profiles[i].bands[j].chains[1], fwrt->geo_profiles[i].bands[j].max); } } return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u8 value; __le32 config_bitmap = 0; /* ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' */ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, DSM_FUNC_ENABLE_INDONESIA_5G2, &iwl_guid, &value); if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) config_bitmap |= cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); /* ** Evaluate func 'DSM_FUNC_DISABLE_SRD' */ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, DSM_FUNC_DISABLE_SRD, &iwl_guid, &value); if (!ret) { if (value == DSM_VALUE_SRD_PASSIVE) config_bitmap |= cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); else if (value == DSM_VALUE_SRD_DISABLE) config_bitmap |= cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); } return config_bitmap; } IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); + +int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) +{ + union acpi_object *wifi_pkg, *data, *flags; + int i, j, ret, tbl_rev, num_sub_bands = 0; + int idx = 2; + + fwrt->ppag_flags = 0; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + /* try to read ppag table rev 2 or 1 (both have the same data size) */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); + + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev == 1 || tbl_rev == 2) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + IWL_DEBUG_RADIO(fwrt, + "Reading PPAG table v2 (tbl_rev=%d)\n", + tbl_rev); + goto read_table; + } else { + ret = -EINVAL; + goto out_free; + } + } + + /* try to read ppag table revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev); + + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + num_sub_bands = IWL_NUM_SUB_BANDS_V1; + IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n"); + goto read_table; + } + +read_table: + fwrt->ppag_ver = tbl_rev; + flags = &wifi_pkg->package.elements[1]; + + if (flags->type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK; + + if (!fwrt->ppag_flags) { + ret = 0; + goto out_free; + } + + /* + * read, verify gain values and save them into the PPAG table. + * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the + * following sub-bands to High-Band (5GHz). + */ + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + for (j = 0; j < num_sub_bands; j++) { + union acpi_object *ent; + + ent = &wifi_pkg->package.elements[idx++]; + if (ent->type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + fwrt->ppag_chains[i].subbands[j] = ent->integer.value; + + if ((j == 0 && + (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || + fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || + (j != 0 && + (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || + fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { + fwrt->ppag_flags = 0; + ret = -EINVAL; + goto out_free; + } + } + } + + + ret = 0; + +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table); + +int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, + int *cmd_size) +{ + u8 cmd_ver; + int i, j, num_sub_bands; + s8 *gain; + + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { + IWL_DEBUG_RADIO(fwrt, + "PPAG capability not supported by FW, command not sent.\n"); + return -EINVAL; + } + if (!fwrt->ppag_flags) { + IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); + return -EINVAL; + } + + /* The 'flags' field is the same in v1 and in v2 so we can just + * use v1 to access it. + */ + cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver == 1) { + num_sub_bands = IWL_NUM_SUB_BANDS_V1; + gain = cmd->v1.gain[0]; + *cmd_size = sizeof(cmd->v1); + if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) { + IWL_DEBUG_RADIO(fwrt, + "PPAG table rev is %d but FW supports v1, sending truncated table\n", + fwrt->ppag_ver); + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + } + } else if (cmd_ver == 2 || cmd_ver == 3) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = cmd->v2.gain[0]; + *cmd_size = sizeof(cmd->v2); + if (fwrt->ppag_ver == 0) { + IWL_DEBUG_RADIO(fwrt, + "PPAG table is v1 but FW supports v2, sending padded table\n"); + } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) { + IWL_DEBUG_RADIO(fwrt, + "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + } + } else { + IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); + return -EINVAL; + } + + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + for (j = 0; j < num_sub_bands; j++) { + gain[i * num_sub_bands + j] = + fwrt->ppag_chains[i].subbands[j]; + IWL_DEBUG_RADIO(fwrt, + "PPAG table: chain[%d] band[%d]: gain = %d\n", + i, j, gain[i * num_sub_bands + j]); + } + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_read_ppag_table); + +bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) +{ + + if (!dmi_check_system(dmi_ppag_approved_list)) { + IWL_DEBUG_RADIO(fwrt, + "System vendor '%s' is not in the approved list, disabling PPAG.\n", + dmi_get_system_info(DMI_SYS_VENDOR)); + fwrt->ppag_flags = 0; + return false; + } + + return true; +} +IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved); diff --git a/sys/contrib/dev/iwlwifi/fw/acpi.h b/sys/contrib/dev/iwlwifi/fw/acpi.h index 466c95c21aa9..6f361c59106f 100644 --- a/sys/contrib/dev/iwlwifi/fw/acpi.h +++ b/sys/contrib/dev/iwlwifi/fw/acpi.h @@ -1,321 +1,349 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ #include #include "fw/api/commands.h" #include "fw/api/power.h" #include "fw/api/phy.h" #include "fw/api/nvm-reg.h" #include "fw/img.h" #include "iwl-trans.h" #define ACPI_WRDS_METHOD "WRDS" #define ACPI_EWRD_METHOD "EWRD" #define ACPI_WGDS_METHOD "WGDS" #define ACPI_WRDD_METHOD "WRDD" #define ACPI_SPLC_METHOD "SPLC" #define ACPI_ECKV_METHOD "ECKV" #define ACPI_PPAG_METHOD "PPAG" #define ACPI_WTAS_METHOD "WTAS" #define ACPI_WIFI_DOMAIN (0x07) #define ACPI_SAR_PROFILE_NUM 4 #define ACPI_NUM_GEO_PROFILES 3 #define ACPI_NUM_GEO_PROFILES_REV3 8 #define ACPI_GEO_PER_CHAIN_SIZE 3 #define ACPI_SAR_NUM_CHAINS_REV0 2 #define ACPI_SAR_NUM_CHAINS_REV1 2 #define ACPI_SAR_NUM_CHAINS_REV2 4 #define ACPI_SAR_NUM_SUB_BANDS_REV0 5 #define ACPI_SAR_NUM_SUB_BANDS_REV1 11 #define ACPI_SAR_NUM_SUB_BANDS_REV2 11 #define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \ ACPI_SAR_NUM_SUB_BANDS_REV0 + 2) #define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \ ACPI_SAR_NUM_SUB_BANDS_REV1 + 2) #define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 2) #define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV0 * \ ACPI_SAR_NUM_SUB_BANDS_REV0 + 3) #define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV1 * \ ACPI_SAR_NUM_SUB_BANDS_REV1 + 3) #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) /* revision 0 and 1 are identical, except for the semantics in the FW */ #define ACPI_GEO_NUM_BANDS_REV0 2 #define ACPI_GEO_NUM_BANDS_REV2 3 #define ACPI_GEO_NUM_CHAINS 2 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* * TAS size: 1 elelment for type, * 1 element for enabled field, * 1 element for block list size, * 16 elements for block list array */ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) #define ACPI_WTAS_ENABLED_MSK 0x1 #define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2 #define ACPI_WTAS_ENABLE_IEC_MSK 0x4 #define ACPI_WTAS_OVERRIDE_IEC_POS 0x1 #define ACPI_WTAS_ENABLE_IEC_POS 0x2 #define ACPI_WTAS_USA_UHB_MSK BIT(16) #define ACPI_WTAS_USA_UHB_POS 16 #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V2) + 2) /* PPAG gain value bounds in 1/8 dBm */ #define ACPI_PPAG_MIN_LB -16 #define ACPI_PPAG_MAX_LB 24 #define ACPI_PPAG_MIN_HB -16 #define ACPI_PPAG_MAX_HB 40 +#define ACPI_PPAG_MASK 3 +#define IWL_PPAG_ETSI_MASK BIT(0) + +#define IWL_SAR_ENABLE_MSK BIT(0) +#define IWL_REDUCE_POWER_FLAGS_POS 1 /* * The profile for revision 2 is a superset of revision 1, which is in * turn a superset of revision 0. So we can store all revisions * inside revision 2, which is what we represent here. */ struct iwl_sar_profile_chain { u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; }; struct iwl_sar_profile { bool enabled; struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2]; }; /* Same thing as with SAR, all revisions fit in revision 2 */ struct iwl_geo_profile_band { u8 max; u8 chains[ACPI_GEO_NUM_CHAINS]; }; struct iwl_geo_profile { struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; /* Same thing as with SAR, all revisions fit in revision 2 */ struct iwl_ppag_chain { s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; }; enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, DSM_FUNC_ENABLE_6E = 3, DSM_FUNC_11AX_ENABLEMENT = 6, DSM_FUNC_ENABLE_UNII4_CHAN = 7, DSM_FUNC_ACTIVATE_CHANNEL = 8, DSM_FUNC_FORCE_DISABLE_CHANNELS = 9 }; enum iwl_dsm_values_srd { DSM_VALUE_SRD_ACTIVE, DSM_VALUE_SRD_PASSIVE, DSM_VALUE_SRD_DISABLE, DSM_VALUE_SRD_MAX }; enum iwl_dsm_values_indonesia { DSM_VALUE_INDONESIA_DISABLE, DSM_VALUE_INDONESIA_ENABLE, DSM_VALUE_INDONESIA_RESERVED, DSM_VALUE_INDONESIA_MAX }; /* DSM RFI uses a different GUID, so need separate definitions */ #define DSM_RFI_FUNC_ENABLE 3 enum iwl_dsm_values_rfi { DSM_VALUE_RFI_ENABLE, DSM_VALUE_RFI_DISABLE, DSM_VALUE_RFI_MAX }; #ifdef CONFIG_ACPI struct iwl_fw_runtime; extern const guid_t iwl_guid; extern const guid_t iwl_rfi_guid; void *iwl_acpi_get_object(struct device *dev, acpi_string method); int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value); int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value); union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, union acpi_object *data, int min_data_size, int max_data_size, int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * * @dev: the struct device * @mcc: output buffer (3 bytes) that will get the MCC * * This function tries to read the current MCC from ACPI if available. */ int iwl_acpi_get_mcc(struct device *dev, char *mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev); /* * iwl_acpi_get_eckv - read external clock validation from ACPI, if available * * @dev: the struct device * @extl_clk: output var (2 bytes) that will get the clk indication. * * This function tries to read the external clock indication * from ACPI if available. */ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt); int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); +int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt); + +int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, + int *cmd_size); + +bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) { return ERR_PTR(-ENOENT); } static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args) { return ERR_PTR(-ENOENT); } static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value) { return -ENOENT; } static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value) { return -ENOENT; } static inline union acpi_object * iwl_acpi_get_wifi_pkg_range(struct device *dev, union acpi_object *data, int min_data_size, int max_data_size, int *tbl_rev) { return ERR_PTR(-ENOENT); } static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc) { return -ENOENT; } static inline u64 iwl_acpi_get_pwr_limit(struct device *dev) { return 0; } static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { return -ENOENT; } static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b) { return -ENOENT; } static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { return 1; } static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) { return false; } static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver) { return -ENOENT; } static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { return 0; } +static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} + +static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, + union iwl_ppag_table_cmd *cmd, int *cmd_size) +{ + return -ENOENT; +} + +static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) +{ + return false; +} + #endif /* CONFIG_ACPI */ static inline union acpi_object * iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev) { return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, tbl_rev); } #endif /* __iwl_fw_acpi__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/commands.h b/sys/contrib/dev/iwlwifi/fw/api/commands.h index a91bd66ecb30..c78d2f1c722c 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/commands.h +++ b/sys/contrib/dev/iwlwifi/fw/api/commands.h @@ -1,615 +1,610 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018-2020 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ /** * enum iwl_mvm_command_groups - command groups for the firmware * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds * @LONG_GROUP: legacy group with long header, also uses command IDs * from &enum iwl_legacy_cmds * @SYSTEM_GROUP: system group, uses command IDs from * &enum iwl_system_subcmd_ids * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from * &enum iwl_mac_conf_subcmd_ids * @PHY_OPS_GROUP: PHY operations group, uses command IDs from * &enum iwl_phy_ops_subcmd_ids * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from * &enum iwl_regulatory_and_nvm_subcmd_ids * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds */ enum iwl_mvm_command_groups { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, SYSTEM_GROUP = 0x2, MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, }; /** * enum iwl_legacy_cmds - legacy group command IDs */ enum iwl_legacy_cmds { /** * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6. */ UCODE_ALIVE_NTFY = 0x1, /** * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. */ REPLY_ERROR = 0x2, /** * @ECHO_CMD: Send data to the device to have it returned immediately. */ ECHO_CMD = 0x3, /** * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. */ INIT_COMPLETE_NOTIF = 0x4, /** * @PHY_CONTEXT_CMD: * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd * or &struct iwl_phy_context_cmd_v1. */ PHY_CONTEXT_CMD = 0x8, /** * @DBG_CFG: Debug configuration command. */ DBG_CFG = 0x9, /** * @SCAN_ITERATION_COMPLETE_UMAC: * Firmware indicates a scan iteration completed, using * &struct iwl_umac_scan_iter_complete_notif. */ SCAN_ITERATION_COMPLETE_UMAC = 0xb5, /** * @SCAN_CFG_CMD: * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2 * or &struct iwl_scan_config */ SCAN_CFG_CMD = 0xc, /** * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac */ SCAN_REQ_UMAC = 0xd, /** * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort */ SCAN_ABORT_UMAC = 0xe, /** * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete */ SCAN_COMPLETE_UMAC = 0xf, /** * @BA_WINDOW_STATUS_NOTIFICATION_ID: * uses &struct iwl_ba_window_status_notif */ BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, /** * @ADD_STA_KEY: * &struct iwl_mvm_add_sta_key_cmd_v1 or * &struct iwl_mvm_add_sta_key_cmd. */ ADD_STA_KEY = 0x17, /** * @ADD_STA: * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. */ ADD_STA = 0x18, /** * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd */ REMOVE_STA = 0x19, /** * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd */ FW_GET_ITEM_CMD = 0x1a, /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or * &struct iwl_mvm_tx_resp_v3 */ TX_CMD = 0x1c, /** * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd */ TXPATH_FLUSH = 0x1e, /** * @MGMT_MCAST_KEY: * &struct iwl_mvm_mgmt_mcast_key_cmd or * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 */ MGMT_MCAST_KEY = 0x1f, /* scheduler config */ /** * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp * for newer (22000) hardware. */ SCD_QUEUE_CFG = 0x1d, /** * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd */ WEP_KEY = 0x20, /** * @SHARED_MEM_CFG: * retrieve shared memory configuration - response in * &struct iwl_shared_mem_cfg */ SHARED_MEM_CFG = 0x25, /** * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd */ TDLS_CHANNEL_SWITCH_CMD = 0x27, /** * @TDLS_CHANNEL_SWITCH_NOTIFICATION: * uses &struct iwl_tdls_channel_switch_notif */ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, /** * @TDLS_CONFIG_CMD: * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res */ TDLS_CONFIG_CMD = 0xa7, /** * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd */ MAC_CONTEXT_CMD = 0x28, /** * @TIME_EVENT_CMD: * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp */ TIME_EVENT_CMD = 0x29, /* both CMD and response */ /** * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif */ TIME_EVENT_NOTIFICATION = 0x2a, /** * @BINDING_CONTEXT_CMD: * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 */ BINDING_CONTEXT_CMD = 0x2b, /** * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd */ TIME_QUOTA_CMD = 0x2c, /** * @NON_QOS_TX_COUNTER_CMD: * command is &struct iwl_nonqos_seq_query_cmd */ NON_QOS_TX_COUNTER_CMD = 0x2d, /** * @LEDS_CMD: command is &struct iwl_led_cmd */ LEDS_CMD = 0x48, /** * @LQ_CMD: using &struct iwl_lq_cmd */ LQ_CMD = 0x4e, /** * @FW_PAGING_BLOCK_CMD: * &struct iwl_fw_paging_cmd */ FW_PAGING_BLOCK_CMD = 0x4f, /** * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac */ SCAN_OFFLOAD_REQUEST_CMD = 0x51, /** * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents */ SCAN_OFFLOAD_ABORT_CMD = 0x52, /** * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req */ HOT_SPOT_CMD = 0x53, /** * @SCAN_OFFLOAD_COMPLETE: * notification, &struct iwl_periodic_scan_complete */ SCAN_OFFLOAD_COMPLETE = 0x6D, /** * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: * update scan offload (scheduled scan) profiles/blocklist/etc. */ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, /** * @MATCH_FOUND_NOTIFICATION: scan match found */ MATCH_FOUND_NOTIFICATION = 0xd9, /** * @SCAN_ITERATION_COMPLETE: * uses &struct iwl_lmac_scan_complete_notif */ SCAN_ITERATION_COMPLETE = 0xe7, /* Phy */ /** * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd_v1 or &struct iwl_phy_cfg_cmd_v3 */ PHY_CONFIGURATION_CMD = 0x6a, /** * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db */ CALIB_RES_NOTIF_PHY_DB = 0x6b, /** * @PHY_DB_CMD: &struct iwl_phy_db_cmd */ PHY_DB_CMD = 0x6c, /** * @POWER_TABLE_CMD: &struct iwl_device_power_cmd */ POWER_TABLE_CMD = 0x77, /** * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: * &struct iwl_uapsd_misbehaving_ap_notif */ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, /** * @LTR_CONFIG: &struct iwl_ltr_config_cmd */ LTR_CONFIG = 0xee, /** * @REPLY_THERMAL_MNG_BACKOFF: * Thermal throttling command */ REPLY_THERMAL_MNG_BACKOFF = 0x7e, /** * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd */ NVM_ACCESS_CMD = 0x88, /** * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif */ BEACON_NOTIFICATION = 0x90, /** * @BEACON_TEMPLATE_CMD: * Uses one of &struct iwl_mac_beacon_cmd_v6, * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd * depending on the device version. */ BEACON_TEMPLATE_CMD = 0x91, /** * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd */ TX_ANT_CONFIGURATION_CMD = 0x98, /** * @STATISTICS_CMD: * one of &struct iwl_statistics_cmd, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics, * &struct iwl_statistics_operational_ntfy_ver_14 */ STATISTICS_CMD = 0x9c, /** * @STATISTICS_NOTIFICATION: * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistic, * &struct iwl_statistics_operational_ntfy_ver_14 * &struct iwl_statistics_operational_ntfy */ STATISTICS_NOTIFICATION = 0x9d, /** * @EOSP_NOTIFICATION: * Notify that a service period ended, * &struct iwl_mvm_eosp_notification */ EOSP_NOTIFICATION = 0x9e, /** * @REDUCE_TX_POWER_CMD: * &struct iwl_dev_tx_power_cmd */ REDUCE_TX_POWER_CMD = 0x9f, /** * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif */ MISSED_BEACONS_NOTIFICATION = 0xa2, /** * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd */ MAC_PM_POWER_TABLE = 0xa9, /** * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif */ MFUART_LOAD_NOTIFICATION = 0xb1, /** * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd */ RSS_CONFIG_CMD = 0xb3, /** * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info */ REPLY_RX_PHY_CMD = 0xc0, /** * @REPLY_RX_MPDU_CMD: * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc */ REPLY_RX_MPDU_CMD = 0xc1, /** * @BAR_FRAME_RELEASE: Frame release from BAR notification, used for * multi-TID BAR (previously, the BAR frame itself was reported * instead). Uses &struct iwl_bar_frame_release. */ BAR_FRAME_RELEASE = 0xc2, /** * @FRAME_RELEASE: * Frame release (reorder helper) notification, uses * &struct iwl_frame_release */ FRAME_RELEASE = 0xc3, /** * @BA_NOTIF: * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif * or &struct iwl_mvm_ba_notif depending on the HW */ BA_NOTIF = 0xc5, /* Location Aware Regulatory */ /** * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd */ MCC_UPDATE_CMD = 0xc8, /** * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif */ MCC_CHUB_UPDATE_CMD = 0xc9, /** * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker * with &struct iwl_mvm_marker_rsp */ MARKER_CMD = 0xcb, /** * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif */ BT_PROFILE_NOTIFICATION = 0xce, /** * @BT_CONFIG: &struct iwl_bt_coex_cmd */ BT_CONFIG = 0x9b, /** * @BT_COEX_UPDATE_REDUCED_TXP: * &struct iwl_bt_coex_reduced_txp_update_cmd */ BT_COEX_UPDATE_REDUCED_TXP = 0x5c, /** * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd */ BT_COEX_CI = 0x5d, /** * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd */ REPLY_SF_CFG_CMD = 0xd1, /** * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd */ REPLY_BEACON_FILTERING_CMD = 0xd2, /** * @DTS_MEASUREMENT_NOTIFICATION: * &struct iwl_dts_measurement_notif_v1 or * &struct iwl_dts_measurement_notif_v2 */ DTS_MEASUREMENT_NOTIFICATION = 0xdd, /** * @LDBG_CONFIG_CMD: configure continuous trace recording */ LDBG_CONFIG_CMD = 0xf6, /** * @DEBUG_LOG_MSG: Debugging log data from firmware */ DEBUG_LOG_MSG = 0xf7, - /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - /** * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd */ MCAST_FILTER_CMD = 0xd0, /** * @D3_CONFIG_CMD: &struct iwl_d3_manager_config */ D3_CONFIG_CMD = 0xd3, /** * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, * &struct iwl_proto_offload_cmd_v3_small, * &struct iwl_proto_offload_cmd_v3_large */ PROT_OFFLOAD_CONFIG_CMD = 0xd4, /** * @OFFLOADS_QUERY_CMD: * No data in command, response in &struct iwl_wowlan_status */ OFFLOADS_QUERY_CMD = 0xd5, /** * @D0I3_END_CMD: End D0i3/D3 state, no command data */ D0I3_END_CMD = 0xed, /** * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd */ WOWLAN_PATTERNS = 0xe0, /** * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd */ WOWLAN_CONFIGURATION = 0xe1, /** * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4, * &struct iwl_wowlan_rsc_tsc_params_cmd */ WOWLAN_TSC_RSC_PARAM = 0xe2, /** * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd */ WOWLAN_TKIP_PARAM = 0xe3, /** * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd */ WOWLAN_KEK_KCK_MATERIAL = 0xe4, /** * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status */ WOWLAN_GET_STATUSES = 0xe5, /** * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: * No command data, response is &struct iwl_scan_offload_profiles_query */ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, }; /** * enum iwl_system_subcmd_ids - system group command IDs */ enum iwl_system_subcmd_ids { /** * @SHARED_MEM_CFG_CMD: * response in &struct iwl_shared_mem_cfg or * &struct iwl_shared_mem_cfg_v2 */ SHARED_MEM_CFG_CMD = 0x0, /** * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd */ SOC_CONFIGURATION_CMD = 0x01, /** * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, /** * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd */ FW_ERROR_RECOVERY_CMD = 0x7, /** * @RFI_CONFIG_CMD: &struct iwl_rfi_config_cmd */ RFI_CONFIG_CMD = 0xb, /** * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd */ RFI_GET_FREQ_TABLE_CMD = 0xc, /** * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd */ SYSTEM_FEATURES_CONTROL_CMD = 0xd, /** * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif */ RFI_DEACTIVATE_NOTIF = 0xff, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/filter.h b/sys/contrib/dev/iwlwifi/fw/api/filter.h index dd62a63956b3..e44c70b7c790 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/filter.h +++ b/sys/contrib/dev/iwlwifi/fw/api/filter.h @@ -1,127 +1,39 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_filter_h__ #define __iwl_fw_api_filter_h__ #include "fw/api/mac.h" #define MAX_PORT_ID_NUM 2 #define MAX_MCAST_FILTERING_ADDRESSES 256 /** * struct iwl_mcast_filter_cmd - configure multicast filter. * @filter_own: Set 1 to filter out multicast packets sent by station itself * @port_id: Multicast MAC addresses array specifier. This is a strange way * to identify network interface adopted in host-device IF. * It is used by FW as index in array of addresses. This array has * MAX_PORT_ID_NUM members. * @count: Number of MAC addresses in the array * @pass_all: Set 1 to pass all multicast packets. * @bssid: current association BSSID. * @reserved: reserved * @addr_list: Place holder for array of MAC addresses. * IMPORTANT: add padding if necessary to ensure DWORD alignment. */ struct iwl_mcast_filter_cmd { u8 filter_own; u8 port_id; u8 count; u8 pass_all; u8 bssid[6]; u8 reserved[2]; u8 addr_list[0]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - #endif /* __iwl_fw_api_filter_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/power.h b/sys/contrib/dev/iwlwifi/fw/api/power.h index 81318208f2f6..f92cac1da764 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/power.h +++ b/sys/contrib/dev/iwlwifi/fw/api/power.h @@ -1,647 +1,670 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_power_h__ #define __iwl_fw_api_power_h__ /* Power Management Commands, Responses, Notifications */ /** * enum iwl_ltr_config_flags - masks for LTR config command flags * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow * memory access * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR * reg change * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from * D0 to D3 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short * idle timeout */ enum iwl_ltr_config_flags { LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), LTR_CFG_FLAG_SW_SET_LONG = BIT(5), LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), LTR_CFG_FLAG_UPDATE_VALUES = BIT(7), }; /** * struct iwl_ltr_config_cmd_v1 - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. */ struct iwl_ltr_config_cmd_v1 { __le32 flags; __le32 static_long; __le32 static_short; } __packed; /* LTR_CAPABLE_API_S_VER_1 */ #define LTR_VALID_STATES_NUM 4 /** * struct iwl_ltr_config_cmd - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order: * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES * is set. * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if * %LTR_CFG_FLAG_UPDATE_VALUES is set. */ struct iwl_ltr_config_cmd { __le32 flags; __le32 static_long; __le32 static_short; __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; __le32 ltr_short_idle_timeout; } __packed; /* LTR_CAPABLE_API_S_VER_2 */ /* Radio LP RX Energy Threshold measured in dBm */ #define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 /** * enum iwl_power_flags - masks for power table command flags * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, * '1' Driver enables PM (use rest of parameters) * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, * '1' PM could sleep over DTIM till listen Interval. * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all * access categories are both delivery and trigger enabled. * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and * PBW Snoozing enabled * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving * detection enablement */ enum iwl_power_flags { POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), POWER_FLAGS_BT_SCO_ENA = BIT(8), POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), POWER_FLAGS_LPRX_ENA_MSK = BIT(11), POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), }; #define IWL_POWER_VEC_SIZE 5 /** * struct iwl_powertable_cmd - legacy power command. Beside old API support this * is used also with a new power API for device wide power settings. * POWER_TABLE_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @debug_flags: debug flags * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @sleep_interval: not in use * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm */ struct iwl_powertable_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; u8 keep_alive_seconds; u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; __le32 skip_dtim_periods; __le32 lprx_rssi_threshold; } __packed; /** * enum iwl_device_power_flags - masks for device power command flags * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: * '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: * Device Retention indication, '1' indicate retention is enabled. * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: * 32Khz external slow clock valid indication, '1' indicate cloack is * valid. */ enum iwl_device_power_flags { DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; /** * struct iwl_device_power_cmd - device wide power command. * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from &enum iwl_device_power_flags * @reserved: reserved (padding) */ struct iwl_device_power_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; __le16 reserved; } __packed; /** * struct iwl_mac_power_cmd - New power command containing uAPSD support * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to * PSM transition - uAPSD * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to * PSM transition - uAPSD * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm * @snooze_interval: Maximum time between attempts to retrieve buffered data * from the AP [msec] * @snooze_window: A window of time in which PBW snoozing insures that all * packets received. It is also the minimum time from last * received unicast RX packet, before client stops snoozing * for data. [msec] * @snooze_step: TBD * @qndp_tid: TID client shall use for uAPSD QNDP triggers * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for * each corresponding AC. * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct * values. * @heavy_tx_thld_packets: TX threshold measured in number of packets * @heavy_rx_thld_packets: RX threshold measured in number of packets * @heavy_tx_thld_percentage: TX threshold measured in load's percentage * @heavy_rx_thld_percentage: RX threshold measured in load's percentage * @limited_ps_threshold: (unused) * @reserved: reserved (padding) */ struct iwl_mac_power_cmd { /* CONTEXT_DESC_API_T_VER_1 */ __le32 id_and_color; /* CLIENT_PM_POWER_TABLE_S_VER_1 */ __le16 flags; __le16 keep_alive_seconds; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 rx_data_timeout_uapsd; __le32 tx_data_timeout_uapsd; u8 lprx_rssi_threshold; u8 skip_dtim_periods; __le16 snooze_interval; __le16 snooze_window; u8 snooze_step; u8 qndp_tid; u8 uapsd_ac_flags; u8 uapsd_max_sp; u8 heavy_tx_thld_packets; u8 heavy_rx_thld_packets; u8 heavy_tx_thld_percentage; u8 heavy_rx_thld_percentage; u8 limited_ps_threshold; u8 reserved; } __packed; /* * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when * associated AP is identified as improperly implementing uAPSD protocol. * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 * @sta_id: index of station in uCode's station table - associated AP ID in * this context. */ struct iwl_uapsd_misbehaving_ap_notif { __le32 sta_id; u8 mac_id; u8 reserved[3]; } __packed; /** * struct iwl_reduce_tx_power_cmd - TX power reduction command * REDUCE_TX_POWER_CMD = 0x9f * @flags: (reserved for future implementation) * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in dBms. */ struct iwl_reduce_tx_power_cmd { u8 flags; u8 mac_context_id; __le16 pwr_restriction; } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_MAC = 0, IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, IWL_TX_POWER_MODE_SET_ACK = 3, IWL_TX_POWER_MODE_SET_SAR_TIMER = 4, IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */; #define IWL_NUM_CHAIN_TABLES 1 #define IWL_NUM_CHAIN_TABLES_V2 2 #define IWL_NUM_CHAIN_LIMITS 2 #define IWL_NUM_SUB_BANDS_V1 5 #define IWL_NUM_SUB_BANDS_V2 11 /** * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high */ struct iwl_dev_tx_power_common { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; __le16 dev_24; __le16 dev_52_low; __le16 dev_52_high; }; /** * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3 * @per_chain: per chain restrictions */ struct iwl_dev_tx_power_cmd_v3 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF /** * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ /** * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v5 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */ /** - * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5 + * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v6 { __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_6 */ +/** + * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7 + * @per_chain: per chain restrictions + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + */ +struct iwl_dev_tx_power_cmd_v7 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; +} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */ /** * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) * @common: common part of the command * @v3: version 3 part of the command * @v4: version 4 part of the command * @v5: version 5 part of the command * @v6: version 6 part of the command */ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_common common; union { struct iwl_dev_tx_power_cmd_v3 v3; struct iwl_dev_tx_power_cmd_v4 v4; struct iwl_dev_tx_power_cmd_v5 v5; struct iwl_dev_tx_power_cmd_v6 v6; + struct iwl_dev_tx_power_cmd_v7 v7; }; }; #define IWL_NUM_GEO_PROFILES 3 #define IWL_NUM_GEO_PROFILES_V3 8 #define IWL_NUM_BANDS_PER_CHAIN_V1 2 #define IWL_NUM_BANDS_PER_CHAIN_V2 3 /** * enum iwl_geo_per_chain_offset_operation - type of operation * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. */ enum iwl_geo_per_chain_offset_operation { IWL_PER_CHAIN_OFFSET_SET_TABLES, IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, }; /* PER_CHAIN_OFFSET_OPERATION_E */ /** * struct iwl_per_chain_offset - embedded struct for PER_CHAIN_LIMIT_OFFSET_CMD. * @max_tx_power: maximum allowed tx power. * @chain_a: tx power offset for chain a. * @chain_b: tx power offset for chain b. */ struct iwl_per_chain_offset { __le16 max_tx_power; u8 chain_a; u8 chain_b; } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ struct iwl_geo_tx_power_profiles_cmd_v1 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v2 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */ /** * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */ /** * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v4 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */ /** * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v5 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */ union iwl_geo_tx_power_profiles_cmd { struct iwl_geo_tx_power_profiles_cmd_v1 v1; struct iwl_geo_tx_power_profiles_cmd_v2 v2; struct iwl_geo_tx_power_profiles_cmd_v3 v3; struct iwl_geo_tx_power_profiles_cmd_v4 v4; struct iwl_geo_tx_power_profiles_cmd_v5 v5; }; /** * struct iwl_geo_tx_power_profiles_resp - response to PER_CHAIN_LIMIT_OFFSET_CMD cmd * @profile_idx: current geo profile in use */ struct iwl_geo_tx_power_profiles_resp { __le32 profile_idx; } __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */ /** * union iwl_ppag_table_cmd - union for all versions of PPAG command * @v1: version 1 * @v2: version 2 * * @flags: bit 0 - indicates enablement of PPAG for ETSI * bit 1 - indicates enablement of PPAG for CHINA BIOS * bit 1 can be used only in v3 (identical to v2) * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ union iwl_ppag_table_cmd { struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; s8 reserved[2]; } v1; struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; s8 reserved[2]; } v2; } __packed; #define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 #define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 /** * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD * @offset_map: mapping a mcc to a geo sar group * @reserved: reserved */ struct iwl_sar_offset_mapping_cmd { u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE] [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE]; u16 reserved; } __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon * to driver if delta in Energy values calculated for this and last * passed beacon is greater than this threshold. Zero value means that * the Energy change is ignored for beacon filtering, and beacon will * not be forced to be sent to driver regardless of this delta. Typical * energy delta 5dB. * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. * Send beacon to driver if delta in Energy values calculated for this * and last passed beacon is greater than this threshold. Zero value * means that the Energy change is ignored for beacon filtering while in * Roaming state, typical energy delta 1dB. * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values * calculated for current beacon is less than the threshold, use * Roaming Energy Delta Threshold, otherwise use normal Energy Delta * Threshold. Typical energy threshold is -72dBm. * @bf_temp_threshold: This threshold determines the type of temperature * filtering (Slow or Fast) that is selected (Units are in Celsuis): * If the current temperature is above this threshold - Fast filter * will be used, If the current temperature is below this threshold - * Slow filter will be used. * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. * @bf_debug_flag: beacon filtering debug configuration * @bf_escape_timer: Send beacons to to driver if no beacons were passed * for a specific period of time. Units: Beacons. * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed * for a longer period of time then this escape-timeout. Units: Beacons. * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. * @bf_threshold_absolute_low: See below. * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated * for this beacon crossed this absolute threshold. For the 'Increase' * direction the bf_energy_absolute_low[i] is used. For the 'Decrease' * direction the bf_energy_absolute_high[i] is used. Zero value means * that this specific threshold is ignored for beacon filtering, and * beacon will not be forced to be sent to driver due to this setting. */ struct iwl_beacon_filter_cmd { __le32 bf_energy_delta; __le32 bf_roaming_energy_delta; __le32 bf_roaming_state; __le32 bf_temp_threshold; __le32 bf_temp_fast_filter; __le32 bf_temp_slow_filter; __le32 bf_enable_beacon_filter; __le32 bf_debug_flag; __le32 bf_escape_timer; __le32 ba_escape_timer; __le32 ba_enable_beacon_abort; __le32 bf_threshold_absolute_low[2]; __le32 bf_threshold_absolute_high[2]; } __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */ /* Beacon filtering and beacon abort */ #define IWL_BF_ENERGY_DELTA_DEFAULT 5 #define IWL_BF_ENERGY_DELTA_D0I3 20 #define IWL_BF_ENERGY_DELTA_MAX 255 #define IWL_BF_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 #define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 #define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 #define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_STATE_DEFAULT 72 #define IWL_BF_ROAMING_STATE_D0I3 72 #define IWL_BF_ROAMING_STATE_MAX 255 #define IWL_BF_ROAMING_STATE_MIN 0 #define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 #define IWL_BF_TEMP_THRESHOLD_D0I3 112 #define IWL_BF_TEMP_THRESHOLD_MAX 255 #define IWL_BF_TEMP_THRESHOLD_MIN 0 #define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 #define IWL_BF_TEMP_FAST_FILTER_D0I3 1 #define IWL_BF_TEMP_FAST_FILTER_MAX 255 #define IWL_BF_TEMP_FAST_FILTER_MIN 0 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 #define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 #define IWL_BF_DEBUG_FLAG_DEFAULT 0 #define IWL_BF_DEBUG_FLAG_D0I3 0 #define IWL_BF_ESCAPE_TIMER_DEFAULT 0 #define IWL_BF_ESCAPE_TIMER_D0I3 0 #define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MIN 0 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6 #define IWL_BA_ESCAPE_TIMER_D0I3 6 #define IWL_BA_ESCAPE_TIMER_D3 9 #define IWL_BA_ESCAPE_TIMER_MAX 1024 #define IWL_BA_ESCAPE_TIMER_MIN 0 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 #define IWL_BF_CMD_CONFIG(mode) \ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ .bf_roaming_energy_delta = \ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) #endif /* __iwl_fw_api_power_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/rs.h b/sys/contrib/dev/iwlwifi/fw/api/rs.h index 2198ca5269e1..687f804c46b7 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/rs.h +++ b/sys/contrib/dev/iwlwifi/fw/api/rs.h @@ -1,762 +1,761 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ #define __iwl_fw_api_rs_h__ #include "mac.h" /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz * bandwidth * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 1 spatial * stream * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 2 spatial * streams */ enum iwl_tlc_mng_cfg_flags { IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4), }; /** * enum iwl_tlc_mng_cfg_cw - channel width options * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value */ enum iwl_tlc_mng_cfg_cw { IWL_TLC_MNG_CH_WIDTH_20MHZ, IWL_TLC_MNG_CH_WIDTH_40MHZ, IWL_TLC_MNG_CH_WIDTH_80MHZ, IWL_TLC_MNG_CH_WIDTH_160MHZ, IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, }; /** * enum iwl_tlc_mng_cfg_chains - possible chains * @IWL_TLC_MNG_CHAIN_A_MSK: chain A * @IWL_TLC_MNG_CHAIN_B_MSK: chain B */ enum iwl_tlc_mng_cfg_chains { IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), }; /** * enum iwl_tlc_mng_cfg_mode - supported modes * @IWL_TLC_MNG_MODE_CCK: enable CCK * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT) * @IWL_TLC_MNG_MODE_NON_HT: enable non HT * @IWL_TLC_MNG_MODE_HT: enable HT * @IWL_TLC_MNG_MODE_VHT: enable VHT * @IWL_TLC_MNG_MODE_HE: enable HE * @IWL_TLC_MNG_MODE_INVALID: invalid value * @IWL_TLC_MNG_MODE_NUM: a count of possible modes */ enum iwl_tlc_mng_cfg_mode { IWL_TLC_MNG_MODE_CCK = 0, IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_HT, IWL_TLC_MNG_MODE_VHT, IWL_TLC_MNG_MODE_HE, IWL_TLC_MNG_MODE_INVALID, IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID, }; /** * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3 * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4 * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5 * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6 * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10 * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11 * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT */ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MCS0 = 0, IWL_TLC_MNG_HT_RATE_MCS1, IWL_TLC_MNG_HT_RATE_MCS2, IWL_TLC_MNG_HT_RATE_MCS3, IWL_TLC_MNG_HT_RATE_MCS4, IWL_TLC_MNG_HT_RATE_MCS5, IWL_TLC_MNG_HT_RATE_MCS6, IWL_TLC_MNG_HT_RATE_MCS7, IWL_TLC_MNG_HT_RATE_MCS8, IWL_TLC_MNG_HT_RATE_MCS9, IWL_TLC_MNG_HT_RATE_MCS10, IWL_TLC_MNG_HT_RATE_MCS11, IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; enum IWL_TLC_MNG_NSS { IWL_TLC_NSS_1, IWL_TLC_NSS_2, IWL_TLC_NSS_MAX }; /** * enum IWL_TLC_MCS_PER_BW - mcs index per BW * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3 * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4 */ enum IWL_TLC_MCS_PER_BW { IWL_TLC_MCS_PER_BW_80, IWL_TLC_MCS_PER_BW_160, IWL_TLC_MCS_PER_BW_320, IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1, IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1, }; /** * struct iwl_tlc_config_cmd_v3 - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @amsdu: TX amsdu is supported * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW * pair (0 - 80mhz width and below, 1 - 160mhz). * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(@enum iwl_tlc_mng_cfg_cw) * @reserved2: reserved * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ struct iwl_tlc_config_cmd_v3 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; u8 mode; u8 chains; u8 amsdu; __le16 flags; __le16 non_ht_rates; __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2; __le32 max_tx_op; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */ /** * struct iwl_tlc_config_cmd_v4 - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(&enum iwl_tlc_mng_cfg_cw) * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz). * @max_mpdu_len: max MPDU length, in bytes * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ struct iwl_tlc_config_cmd_v4 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; u8 mode; u8 chains; u8 sgi_ch_width_supp; __le16 flags; __le16 non_ht_rates; __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4]; __le16 max_mpdu_len; __le16 max_tx_op; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */ /** * enum iwl_tlc_update_flags - updated fields * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update */ enum iwl_tlc_update_flags { IWL_TLC_NOTIF_FLAG_RATE = BIT(0), IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1), }; /** * struct iwl_tlc_update_notif - TLC notification from FW * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported * @rate: current initial rate * @amsdu_size: Max AMSDU size, in bytes * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ struct iwl_tlc_update_notif { u8 sta_id; u8 reserved[3]; __le32 flags; __le32 rate; __le32 amsdu_size; __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ #define IWL_MAX_MCS_DISPLAY_SIZE 12 struct iwl_rate_mcs_info { char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; }; /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; * TODO: avoid overlap between legacy and HT rates */ enum { IWL_RATE_1M_INDEX = 0, IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, IWL_RATE_2M_INDEX, IWL_RATE_5M_INDEX, IWL_RATE_11M_INDEX, IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, IWL_RATE_6M_INDEX, IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, IWL_RATE_9M_INDEX, IWL_RATE_12M_INDEX, IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, IWL_RATE_24M_INDEX, IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, IWL_RATE_48M_INDEX, IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX, IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, IWL_RATE_60M_INDEX, IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, IWL_RATE_MCS_8_INDEX, IWL_RATE_MCS_9_INDEX, IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, IWL_RATE_MCS_10_INDEX, IWL_RATE_MCS_11_INDEX, IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, IWL_RATE_INVALID = IWL_RATE_COUNT, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) /* fw API values for legacy bit rates, both OFDM and CCK */ enum { IWL_RATE_6M_PLCP = 13, IWL_RATE_9M_PLCP = 15, IWL_RATE_12M_PLCP = 5, IWL_RATE_18M_PLCP = 7, IWL_RATE_24M_PLCP = 9, IWL_RATE_36M_PLCP = 11, IWL_RATE_48M_PLCP = 1, IWL_RATE_54M_PLCP = 3, IWL_RATE_1M_PLCP = 10, IWL_RATE_2M_PLCP = 20, IWL_RATE_5M_PLCP = 55, IWL_RATE_11M_PLCP = 110, IWL_RATE_INVM_PLCP = -1, }; /* * rate_n_flags bit fields version 1 * * The 32-bit value has different layouts in the low 8 bites depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats * for CCK and OFDM). * * High-throughput (HT) rate format * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) * Very High-throughput (VHT) rate format * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) * Legacy OFDM rate format for bits 7:0 * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) */ /* Bit 8: (1) HT format, (0) legacy or VHT format */ #define RATE_MCS_HT_POS 8 #define RATE_MCS_HT_MSK_V1 BIT(RATE_MCS_HT_POS) /* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ #define RATE_MCS_CCK_POS_V1 9 #define RATE_MCS_CCK_MSK_V1 BIT(RATE_MCS_CCK_POS_V1) /* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ #define RATE_MCS_VHT_POS_V1 26 #define RATE_MCS_VHT_MSK_V1 BIT(RATE_MCS_VHT_POS_V1) /* * High-throughput (HT) rate format for bits 7:0 * * 2-0: MCS rate base * 0) 6 Mbps * 1) 12 Mbps * 2) 18 Mbps * 3) 24 Mbps * 4) 36 Mbps * 5) 48 Mbps * 6) 54 Mbps * 7) 60 Mbps * 4-3: 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data * (bits 7-6 are zero) * * Together the low 5 bits work out to the MCS index because we don't * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two * streams and 16-23 have three streams. We could also support MCS 32 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) */ #define RATE_HT_MCS_RATE_CODE_MSK_V1 0x7 #define RATE_HT_MCS_NSS_POS_V1 3 #define RATE_HT_MCS_NSS_MSK_V1 (3 << RATE_HT_MCS_NSS_POS_V1) #define RATE_HT_MCS_MIMO2_MSK BIT(RATE_HT_MCS_NSS_POS_V1) /* Bit 10: (1) Use Green Field preamble */ #define RATE_HT_MCS_GF_POS 10 #define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) #define RATE_HT_MCS_INDEX_MSK_V1 0x3f /* * Very High-throughput (VHT) rate format for bits 7:0 * * 3-0: VHT MCS (0-9) * 5-4: number of streams - 1: * 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) */ /* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ #define RATE_VHT_MCS_RATE_CODE_MSK 0xf #define RATE_VHT_MCS_NSS_POS 4 #define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) #define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS) /* * Legacy OFDM rate format for bits 7:0 * * 3-0: 0xD) 6 Mbps * 0xF) 9 Mbps * 0x5) 12 Mbps * 0x7) 18 Mbps * 0x9) 24 Mbps * 0xB) 36 Mbps * 0x1) 48 Mbps * 0x3) 54 Mbps * (bits 7-4 are 0) * * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): * * 6-0: 10) 1 Mbps * 20) 2 Mbps * 55) 5.5 Mbps * 110) 11 Mbps * (bit 7 is 0) */ #define RATE_LEGACY_RATE_MSK_V1 0xff /* Bit 10 - OFDM HE */ #define RATE_MCS_HE_POS_V1 10 #define RATE_MCS_HE_MSK_V1 BIT(RATE_MCS_HE_POS_V1) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT */ #define RATE_MCS_CHAN_WIDTH_POS 11 #define RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << RATE_MCS_CHAN_WIDTH_POS) /* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ #define RATE_MCS_SGI_POS_V1 13 #define RATE_MCS_SGI_MSK_V1 BIT(RATE_MCS_SGI_POS_V1) /* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ #define RATE_MCS_ANT_POS 14 #define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ RATE_MCS_ANT_B_MSK) #define RATE_MCS_ANT_MSK RATE_MCS_ANT_AB_MSK /* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 #define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) /* Bit 18: OFDM-HE dual carrier mode */ #define RATE_HE_DUAL_CARRIER_MODE 18 #define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ #define RATE_MCS_BF_POS 19 #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) /* * Bit 20-21: HE LTF type and guard interval * HE (ext) SU: * 0 1xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 & SGI (bit 13) clear 4xLTF+3.2us * 3 & SGI (bit 13) set 4xLTF+0.8us * HE MU: * 0 4xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * HE TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * 3 (does not occur) */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS_V1 22 #define RATE_MCS_HE_TYPE_SU_V1 (0 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_EXT_SU_V1 BIT(RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_MU_V1 (2 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_TRIG_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_MSK_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ #define RATE_MCS_DUP_POS_V1 24 #define RATE_MCS_DUP_MSK_V1 (3 << RATE_MCS_DUP_POS_V1) /* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ #define RATE_MCS_LDPC_POS_V1 27 #define RATE_MCS_LDPC_MSK_V1 BIT(RATE_MCS_LDPC_POS_V1) /* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ #define RATE_MCS_HE_106T_POS_V1 28 #define RATE_MCS_HE_106T_MSK_V1 BIT(RATE_MCS_HE_106T_POS_V1) /* Bit 30-31: (1) RTS, (2) CTS */ #define RATE_MCS_RTS_REQUIRED_POS (30) #define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS) #define RATE_MCS_CTS_REQUIRED_POS (31) #define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS) /* rate_n_flags bit field version 2 * * The 32-bit value has different layouts in the low 8 bits depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats * for CCK and OFDM). * */ /* Bits 10-8: rate format * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT) * (3) Very High-throughput (VHT) (4) High-efficiency (HE) * (5) Extremely High-throughput (EHT) */ #define RATE_MCS_MOD_TYPE_POS 8 #define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS) /* * Legacy CCK rate format for bits 0:3: * * (0) 0xa - 1 Mbps * (1) 0x14 - 2 Mbps * (2) 0x37 - 5.5 Mbps * (3) 0x6e - 11 nbps * * Legacy OFDM rate format for bis 3:0: * * (0) 6 Mbps * (1) 9 Mbps * (2) 12 Mbps * (3) 18 Mbps * (4) 24 Mbps * (5) 36 Mbps * (6) 48 Mbps * (7) 54 Mbps * */ #define RATE_LEGACY_RATE_MSK 0x7 /* * HT, VHT, HE, EHT rate format for bits 3:0 * 3-0: MCS * */ #define RATE_HT_MCS_CODE_MSK 0x7 #define RATE_MCS_NSS_POS 4 #define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS) #define RATE_MCS_CODE_MSK 0xf #define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \ ((r) & RATE_HT_MCS_CODE_MSK)) /* Bits 7-5: reserved */ /* * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz */ #define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) #define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS) /* Bit 15-14: Antenna selection: * Bit 14: Ant A active * Bit 15: Ant B active * * All relevant definitions are same as in v1 */ /* Bit 16 (1) LDPC enables, (0) LDPC disabled */ #define RATE_MCS_LDPC_POS 16 #define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) /* Bit 17: (0) SS, (1) SS*2 (same as v1) */ /* Bit 18: OFDM-HE dual carrier mode (same as v1) */ /* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */ /* * Bit 22-20: HE LTF type and guard interval * CCK: * 0 long preamble * 1 short preamble * HT/VHT: * 0 0.8us * 1 0.4us * HE (ext) SU: * 0 1xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * 4 4xLTF+0.8us * HE MU: * 0 4xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * HE TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * */ #define RATE_MCS_HE_GI_LTF_MSK (0x7 << RATE_MCS_HE_GI_LTF_POS) #define RATE_MCS_SGI_POS RATE_MCS_HE_GI_LTF_POS #define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) #define RATE_MCS_HE_SU_4_LTF 3 #define RATE_MCS_HE_SU_4_LTF_08_GI 4 /* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS 23 #define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 25: duplicate channel enabled * * if this bit is set, duplicate is according to BW (bits 11-13): * * CCK: 2x 20MHz * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16) * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160) * */ #define RATE_MCS_DUP_POS 25 #define RATE_MCS_DUP_MSK (1 << RATE_MCS_DUP_POS) /* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ #define RATE_MCS_HE_106T_POS 26 #define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) /* Bit 27: EHT extra LTF: * instead of 1 LTF for SISO use 2 LTFs, * instead of 2 LTFs for NSTS=2 use 4 LTFs*/ #define RATE_MCS_EHT_EXTRA_LTF_POS 27 #define RATE_MCS_EHT_EXTRA_LTF_MSK (1 << RATE_MCS_EHT_EXTRA_LTF_POS) /* Bit 31-28: reserved */ /* Link Quality definitions */ /* # entries in rate scale table to support Tx retries */ #define LQ_MAX_RETRY_NUM 16 /* Link quality command flags bit fields */ /* Bit 0: (0) Don't use RTS (1) Use RTS */ #define LQ_FLAG_USE_RTS_POS 0 #define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ #define LQ_FLAG_COLOR_POS 1 #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ LQ_FLAG_COLOR_POS) #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ LQ_FLAG_COLOR_MSK) #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) /* Bit 4-5: Tx RTS BW Signalling * (0) No RTS BW signalling * (1) Static BW signalling * (2) Dynamic BW signalling */ #define LQ_FLAG_RTS_BW_SIG_POS 4 #define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) /* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection * Dyanmic BW selection allows Tx with narrower BW then requested in rates */ #define LQ_FLAG_DYNAMIC_BW_POS 6 #define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) /* Single Stream Tx Parameters (lq_cmd->ss_params) * Flags to control a smart FW decision about whether BFER/STBC/SISO will be * used for single stream Tx. */ /* Bit 0-1: Max STBC streams allowed. Can be 0-3. * (0) - No STBC allowed * (1) - 2x1 STBC allowed (HT/VHT) * (2) - 4x2 STBC allowed (HT/VHT) * (3) - 3x2 STBC allowed (HT only) * All our chips are at most 2 antennas so only (1) is valid for now. */ #define LQ_SS_STBC_ALLOWED_POS 0 #define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) /* 2x1 STBC is allowed */ #define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) /* Bit 2: Beamformer (VHT only) is allowed */ #define LQ_SS_BFER_ALLOWED_POS 2 #define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) /* Bit 3: Force BFER or STBC for testing * If this is set: * If BFER is allowed then force the ucode to choose BFER else * If STBC is allowed then force the ucode to choose STBC over SISO */ #define LQ_SS_FORCE_POS 3 #define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) /* Bit 31: ss_params field is valid. Used for FW backward compatibility * with other drivers which don't support the ss_params API yet */ #define LQ_SS_PARAMS_VALID_POS 31 #define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) /** * struct iwl_lq_cmd - link quality command * @sta_id: station to update * @reduced_tpc: reduced transmit power control value * @control: not used * @flags: combination of LQ_FLAG_* * @mimo_delim: the first SISO index in rs_table, which separates MIMO * and SISO rates * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). * Should be ANT_[ABC] * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] * @initial_rate_index: first index from rs_table per AC category * @agg_time_limit: aggregation max time threshold in usec/100, meaning * value of 100 is one usec. Range is 100 to 8000 * @agg_disable_start_th: try-count threshold for starting aggregation. * If a frame has higher try-count, it should not be selected for * starting an aggregation sequence. * @agg_frame_cnt_limit: max frame count in an aggregation. * 0: no limit * 1: no aggregation (one frame per aggregation) * 2 - 0x3f: maximal number of frames (up to 3f == 63) * @reserved2: reserved * @rs_table: array of rates for each TX try, each is rate_n_flags, * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP * @ss_params: single stream features. declare whether STBC or BFER are allowed. */ struct iwl_lq_cmd { u8 sta_id; u8 reduced_tpc; __le16 control; /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ u8 flags; u8 mimo_delim; u8 single_stream_ant_msk; u8 dual_stream_ant_msk; u8 initial_rate_index[AC_NUM]; /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ __le16 agg_time_limit; u8 agg_disable_start_th; u8 agg_frame_cnt_limit; __le32 reserved2; __le32 rs_table[LQ_MAX_RETRY_NUM]; __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ u8 iwl_fw_rate_idx_to_plcp(int idx); u32 iwl_new_rate_from_v1(u32 rate_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); const char *iwl_rs_pretty_ant(u8 ant); const char *iwl_rs_pretty_bw(int bw); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); bool iwl_he_is_sgi(u32 rate_n_flags); #endif /* __iwl_fw_api_rs_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/dbg.c b/sys/contrib/dev/iwlwifi/fw/dbg.c index 2b21b69b23da..c2928d1f2b65 100644 --- a/sys/contrib/dev/iwlwifi/fw/dbg.c +++ b/sys/contrib/dev/iwlwifi/fw/dbg.c @@ -1,3136 +1,3166 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "iwl-fh.h" /** * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump * * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { struct iwl_trans_dump_data *trans_ptr; void *fwrt_ptr; u32 fwrt_len; }; #define RADIO_REG_MAX_READ 0x2ad static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; int i; IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); for (i = 0; i < RADIO_REG_MAX_READ; i++) { u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the RXF */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the FIFO */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ for (i = 0; i < fifo_len / sizeof(u32); i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, fifo_data, fifo_len); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + fwrt->trans->trans_cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2); } iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; int i, j; IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i); } /* Pull TXF data from LMAC2 */ if (fwrt->smem_cfg.num_lmacs > 1) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET, i + cfg->num_txfifo_entries); } } } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) continue; /* Add a TLV for the internal FIFOs */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } iwl_trans_release_nic_access(fwrt->trans); } struct iwl_prph_range { u32 start, end; }; static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a004c0, .end = 0x00a004cc }, { .start = 0x00a004d8, .end = 0x00a004d8 }, { .start = 0x00a004e0, .end = 0x00a004f0 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01094 }, { .start = 0x00a01c00, .end = 0x00a01c20 }, { .start = 0x00a01c58, .end = 0x00a01c58 }, { .start = 0x00a01c7c, .end = 0x00a01c7c }, { .start = 0x00a01c28, .end = 0x00a01c54 }, { .start = 0x00a01c5c, .end = 0x00a01c5c }, { .start = 0x00a01c60, .end = 0x00a01cdc }, { .start = 0x00a01ce0, .end = 0x00a01d0c }, { .start = 0x00a01d18, .end = 0x00a01d20 }, { .start = 0x00a01d2c, .end = 0x00a01d30 }, { .start = 0x00a01d40, .end = 0x00a01d5c }, { .start = 0x00a01d80, .end = 0x00a01d80 }, { .start = 0x00a01d98, .end = 0x00a01d9c }, { .start = 0x00a01da8, .end = 0x00a01da8 }, { .start = 0x00a01db8, .end = 0x00a01df4 }, { .start = 0x00a01dc0, .end = 0x00a01dfc }, { .start = 0x00a01e00, .end = 0x00a01e2c }, { .start = 0x00a01e40, .end = 0x00a01e60 }, { .start = 0x00a01e68, .end = 0x00a01e6c }, { .start = 0x00a01e74, .end = 0x00a01e74 }, { .start = 0x00a01e84, .end = 0x00a01e90 }, { .start = 0x00a01e9c, .end = 0x00a01ec4 }, { .start = 0x00a01ed0, .end = 0x00a01ee0 }, { .start = 0x00a01f00, .end = 0x00a01f1c }, { .start = 0x00a01f44, .end = 0x00a01ffc }, { .start = 0x00a02000, .end = 0x00a02048 }, { .start = 0x00a02068, .end = 0x00a020f0 }, { .start = 0x00a02100, .end = 0x00a02118 }, { .start = 0x00a02140, .end = 0x00a0214c }, { .start = 0x00a02168, .end = 0x00a0218c }, { .start = 0x00a021c0, .end = 0x00a021c0 }, { .start = 0x00a02400, .end = 0x00a02410 }, { .start = 0x00a02418, .end = 0x00a02420 }, { .start = 0x00a02428, .end = 0x00a0242c }, { .start = 0x00a02434, .end = 0x00a02434 }, { .start = 0x00a02440, .end = 0x00a02460 }, { .start = 0x00a02468, .end = 0x00a024b0 }, { .start = 0x00a024c8, .end = 0x00a024cc }, { .start = 0x00a02500, .end = 0x00a02504 }, { .start = 0x00a0250c, .end = 0x00a02510 }, { .start = 0x00a02540, .end = 0x00a02554 }, { .start = 0x00a02580, .end = 0x00a025f4 }, { .start = 0x00a02600, .end = 0x00a0260c }, { .start = 0x00a02648, .end = 0x00a02650 }, { .start = 0x00a02680, .end = 0x00a02680 }, { .start = 0x00a026c0, .end = 0x00a026d0 }, { .start = 0x00a02700, .end = 0x00a0270c }, { .start = 0x00a02804, .end = 0x00a02804 }, { .start = 0x00a02818, .end = 0x00a0281c }, { .start = 0x00a02c00, .end = 0x00a02db4 }, { .start = 0x00a02df4, .end = 0x00a02fb0 }, { .start = 0x00a03000, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03048 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03074 }, { .start = 0x00a0307c, .end = 0x00a0307c }, { .start = 0x00a03080, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030bc }, { .start = 0x00a030c0, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05c00, .end = 0x00a05c18 }, { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, { .start = 0x00a02400, .end = 0x00a02758 }, { .start = 0x00a04764, .end = 0x00a0476c }, { .start = 0x00a04770, .end = 0x00a04774 }, { .start = 0x00a04620, .end = 0x00a04624 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a00034 }, { .start = 0x00a0003c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01050 }, { .start = 0x00a01058, .end = 0x00a01058 }, { .start = 0x00a01060, .end = 0x00a01070 }, { .start = 0x00a0108c, .end = 0x00a0108c }, { .start = 0x00a01c20, .end = 0x00a01c28 }, { .start = 0x00a01d10, .end = 0x00a01d10 }, { .start = 0x00a01e28, .end = 0x00a01e2c }, { .start = 0x00a01e60, .end = 0x00a01e60 }, { .start = 0x00a01e80, .end = 0x00a01e80 }, { .start = 0x00a01ea0, .end = 0x00a01ea0 }, { .start = 0x00a02000, .end = 0x00a0201c }, { .start = 0x00a02024, .end = 0x00a02024 }, { .start = 0x00a02040, .end = 0x00a02048 }, { .start = 0x00a020c0, .end = 0x00a020e0 }, { .start = 0x00a02400, .end = 0x00a02404 }, { .start = 0x00a0240c, .end = 0x00a02414 }, { .start = 0x00a0241c, .end = 0x00a0243c }, { .start = 0x00a02448, .end = 0x00a024bc }, { .start = 0x00a024c4, .end = 0x00a024cc }, { .start = 0x00a02508, .end = 0x00a02508 }, { .start = 0x00a02510, .end = 0x00a02514 }, { .start = 0x00a0251c, .end = 0x00a0251c }, { .start = 0x00a0252c, .end = 0x00a0255c }, { .start = 0x00a02564, .end = 0x00a025a0 }, { .start = 0x00a025a8, .end = 0x00a025b4 }, { .start = 0x00a025c0, .end = 0x00a025c0 }, { .start = 0x00a025e8, .end = 0x00a025f4 }, { .start = 0x00a02c08, .end = 0x00a02c18 }, { .start = 0x00a02c2c, .end = 0x00a02c38 }, { .start = 0x00a02c68, .end = 0x00a02c78 }, { .start = 0x00a03000, .end = 0x00a03000 }, { .start = 0x00a03010, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03044 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03070 }, { .start = 0x00a0307c, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030c0 }, { .start = 0x00a030c8, .end = 0x00a030f4 }, { .start = 0x00a03100, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a04560 }, { .start = 0x00a04570, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04590 }, { .start = 0x00a04598, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, { .start = 0x00a05c18, .end = 0x00a05c1c }, { .start = 0x00a0c000, .end = 0x00a0c018 }, { .start = 0x00a0c020, .end = 0x00a0c028 }, { .start = 0x00a0c038, .end = 0x00a0c094 }, { .start = 0x00a0c0c0, .end = 0x00a0c104 }, { .start = 0x00a0c10c, .end = 0x00a0c118 }, { .start = 0x00a0c150, .end = 0x00a0c174 }, { .start = 0x00a0c17c, .end = 0x00a0c188 }, { .start = 0x00a0c190, .end = 0x00a0c198 }, { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = { { .start = 0x00d03c00, .end = 0x00d03c64 }, { .start = 0x00d05c18, .end = 0x00d05c1c }, { .start = 0x00d0c000, .end = 0x00d0c174 }, }; static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { u32 i; for (i = 0; i < len_bytes; i += 4) *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; struct iwl_trans *trans = fwrt->trans; struct iwl_fw_error_dump_data **data = (struct iwl_fw_error_dump_data **)ptr; u32 i; if (!data) return; IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans)) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, /* our range is inclusive, hence + 4 */ iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4, (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans); } /* * alloc_sgtable - allocates scallerlist table in the given size, * fills it with pages and returns it * @size: the size (in bytes) of the table */ static struct scatterlist *alloc_sgtable(int size) { int alloc_size, nents, i; struct page *new_page; struct scatterlist *iter; struct scatterlist *table; nents = DIV_ROUND_UP(size, PAGE_SIZE); table = kcalloc(nents, sizeof(*table), GFP_KERNEL); if (!table) return NULL; sg_init_table(table, nents); iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = alloc_page(GFP_KERNEL); if (!new_page) { /* release all previous allocated pages in the table */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = sg_page(iter); if (new_page) __free_page(new_page); } kfree(table); return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); size -= PAGE_SIZE; sg_set_page(iter, new_page, alloc_size, 0); } return table; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { u32 *prph_len = (u32 *)ptr; int i, num_bytes_in_chunk; if (!prph_len) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } } static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, void (*handler)(struct iwl_fw_runtime *, const struct iwl_prph_range *, u32, void *)) { u32 range_len; if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); } else if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); } else { range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); if (fwrt->trans->trans_cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, u32 len, u32 ofs, u32 type) { struct iwl_fw_error_dump_mem *dump_mem; if (!len) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)(*dump_data)->data; dump_mem->type = cpu_to_le32(type); dump_mem->offset = cpu_to_le32(ofs); iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); *dump_data = iwl_fw_error_next_data(*dump_data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs, dump_mem->data, len); IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) return 0; /* Count RXF2 size */ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); return fifo_len; } static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) goto dump_internal_txf; /* Count TXF sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; for (j = 0; j < mem_cfg->num_txfifo_entries; j++) ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len); } dump_internal_txf: if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) goto out; for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); out: return fifo_len; } static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **data) { int i; IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = fwrt->fw_paging_db[i].fw_paging_block; dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)(*data)->data; paging->index = cpu_to_le32(i); dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dma_sync_single_for_device(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, fwrt->fw_paging_db[i].fw_offs, paging->data, PAGING_BLOCK_SIZE); } } static struct iwl_fw_error_dump_file * iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump, struct iwl_fwrt_dump_data *data) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { sram_ofs = fwrt->trans->cfg->dccm_offset; sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); /* Dump SRAM only if no mem_tlvs */ if (!fwrt->fw->dbg.n_mem_tlv) ADD_LEN(file_len, sram_len, hdr_len); /* Make room for all mem types that exist */ ADD_LEN(file_len, smem_len, hdr_len); ADD_LEN(file_len, sram2_len, hdr_len); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ if (data->monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + data->desc->len; dump_file = vzalloc(file_len); if (!dump_file) return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->name, sizeof(dump_info->dev_human_readable) - 1); #if defined(__linux__) strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); #elif defined(__FreeBSD__) /* XXX TODO */ strncpy(dump_info->bus_human_readable, "", sizeof(dump_info->bus_human_readable) - 1); #endif dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]); if (fwrt->smem_cfg.num_lmacs > 1) dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]); dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_smem_cfg = (void *)dump_data->data; dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries); for (i = 0; i < MAX_NUM_LMAC; i++) { int j; u32 *txf_size = mem_cfg->lmac[i].txfifo_size; for (j = 0; j < TX_FIFO_MAX_NUM; j++) dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]); dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); } dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr); for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } dump_data = iwl_fw_error_next_data(dump_data); } /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); } if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + data->desc->len); dump_trig = (void *)dump_data->data; memcpy(dump_trig, &data->desc->trig_desc, sizeof(*dump_trig) + data->desc->len); dump_data = iwl_fw_error_next_data(dump_data); } /* In case we only want monitor dump, skip to dump trasport data */ if (data->monitor_only) goto out; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; if (!fwrt->fw->dbg.n_mem_tlv) iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type)); } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset, IWL_FW_ERROR_DUMP_MEM_SRAM); } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; size_t data_size = fwrt->trans->cfg->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr, dump_data->data + data_size, data_size); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); if (prph_len) iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; } /** * struct iwl_dump_ini_region_data - region data * @reg_tlv: region TLV * @dump_data: dump data */ struct iwl_dump_ini_region_data { struct iwl_ucode_tlv *reg_tlv; struct iwl_fwrt_dump_data *dump_data; }; static int iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); if (prph_val == 0x5a5a5a5a) return -EBUSY; *val++ = cpu_to_le32(prph_val); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; u32 indirect_rd_addr = WMAL_MRSPF_1; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]); u32 dphy_state; u32 dphy_addr; int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { if (dphy_state == HBUS_TIMEOUT || (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != WFPM_PHYRF_STATE_ON) { *val++ = cpu_to_le32(WFPM_DPHY_OFF); continue; } iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr, WMAL_INDRCT_CMD(addr + i)); prph_val = iwl_read_prph_no_grab(fwrt->trans, indirect_rd_addr); *val++ = cpu_to_le32(prph_val); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; /* we shouldn't get here if the trans doesn't have read_config32 */ if (WARN_ON_ONCE(!trans->ops->read_config32)) return -EOPNOTSUPP; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { int ret; u32 tmp; ret = trans->ops->read_config32(trans, addr + i, &tmp); if (ret < 0) return ret; *val++ = cpu_to_le32(tmp); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM && fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, range->data, le32_to_cpu(reg->dev_addr.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, void *range_ptr, u32 range_len, int idx) { struct page *page = fwrt->fw_paging_db[idx].fw_paging_block; struct iwl_fw_ini_error_dump_range *range = range_ptr; dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); memcpy(range->data, page_address(page), page_size); dma_sync_single_for_device(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range; u32 page_size; /* all paged index start from 1 to skip CSS section */ idx++; if (!fwrt->trans->trans_cfg->gen2) return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx); range = range_ptr; page_size = fwrt->trans->init_dram.paging[idx].size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, page_size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_dram_data *frag; u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx]; range->dram_base_addr = cpu_to_le64(frag->physical); range->range_data_size = cpu_to_le32(frag->size); memcpy(range->data, frag->block, frag->size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->internal_buffer.base_addr); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal_buffer.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->internal_buffer.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; int txf_num = cfg->num_txfifo_entries; int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]); if (!idx) { if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) { IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n", le32_to_cpu(reg->fifos.offset)); return false; } iter->internal_txf = 0; iter->fifo_size = 0; iter->fifo = -1; if (le32_to_cpu(reg->fifos.offset)) iter->lmac = 1; else iter->lmac = 0; } if (!iter->internal_txf) { for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { iter->fifo_size = cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } iter->fifo--; } iter->internal_txf = 1; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) return false; for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) { iter->fifo_size = cfg->internal_txfifo_size[iter->fifo - txf_num]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } return false; } static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; if (!iwl_ini_txf_iter(fwrt, reg_data, idx)) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); /* * read txf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs, TXF_WR_PTR + offs); /* Dummy-read to advance the read pointer to the head */ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs); /* Read FIFO */ addr = TXF_READ_MODIFY_DATA + offs; data = (void *)reg_dump; for (i = 0; i < iter->fifo_size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, reg_dump, iter->fifo_size); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } struct iwl_ini_rxf_data { u32 fifo_num; u32 size; u32 offset; }; static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, struct iwl_ini_rxf_data *data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 fid1 = le32_to_cpu(reg->fifos.fid[0]); u32 fid2 = le32_to_cpu(reg->fifos.fid[1]); u8 fifo_idx; if (!data) return; /* make sure only one bit is set in only one fid */ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1, "fid1=%x, fid2=%x\n", fid1, fid2)) return; memset(data, 0, sizeof(*data)); if (fid1) { fifo_idx = ffs(fid1) - 1; if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n", fifo_idx)) return; data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; data->fifo_num = fifo_idx; } else { u8 max_idx; fifo_idx = ffs(fid2) - 1; if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP, SHARED_MEM_CFG_CMD, 0) <= 3) max_idx = 0; else max_idx = 1; if (WARN_ONCE(fifo_idx > max_idx, "invalid umac fifo idx %d", fifo_idx)) return; /* use bit 31 to distinguish between umac and lmac rxf while * parsing the dump */ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; switch (fifo_idx) { case 0: data->size = fwrt->smem_cfg.rxfifo2_size; data->offset = iwl_umac_prph(fwrt->trans, RXF_DIFF_FROM_PREV); break; case 1: data->size = fwrt->smem_cfg.rxfifo2_control_size; data->offset = iwl_umac_prph(fwrt->trans, RXF2C_DIFF_FROM_PREV); break; } } } static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_ini_rxf_data rxf_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data); if (!rxf_data.size) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); /* * read rxf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1); /* Set fence offset */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs, 0x0); /* Read FIFO */ addr = RXF_FIFO_RD_FENCE_INC + offs; data = (void *)reg_dump; for (i = 0; i < rxf_data.size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_err_table *err_table = ®->err_table; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(err_table->base_addr) + le32_to_cpu(err_table->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = err_table->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(err_table->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_special_device_memory *special_mem = ®->special_mem; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(special_mem->base_addr) + le32_to_cpu(special_mem->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = special_mem->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(special_mem->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_data; int i; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->range_data_size = reg->dev_addr.size; for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); if (prph_data == 0x5a5a5a5a) { iwl_trans_release_nic_access(fwrt->trans); return -EBUSY; } *val++ = cpu_to_le32(prph_data); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt; u32 pkt_len; if (!pkt) return -EIO; pkt_len = iwl_rx_packet_payload_len(pkt); memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr)); range->range_data_size = cpu_to_le32(pkt_len); memcpy(range->data, pkt->data, pkt_len); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { /* read the IMR memory and DMA it to SRAM */ struct iwl_fw_ini_error_dump_range *range = range_ptr; u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr; u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte; u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes; range->range_data_size = cpu_to_le32(size_to_dump); if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr, imr_curr_addr, size_to_dump)) { IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n"); return -1; } fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump; fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump; iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data, size_to_dump); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static void * iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } /** * mask_apply_and_normalize - applies mask on val and normalize the result * * The normalization is based on the first set bit in the mask * * @val: value * @mask: mask to apply and to normalize with */ static u32 mask_apply_and_normalize(u32 val, u32 mask) { return (val & mask) >> (ffs(mask) - 1); } static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, const struct iwl_fw_mon_reg *reg_info) { u32 val, offs; /* The header addresses of DBGCi is calculate as follows: * DBGC1 address + (0x100 * i) */ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100; if (!reg_info || !reg_info->addr || !reg_info->mask) return 0; val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs); return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask)); } static void * iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; } data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id, &addrs->write_ptr); if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { u32 wrt_ptr = le32_to_cpu(data->write_ptr); data->write_ptr = cpu_to_le32(wrt_ptr >> 2); } data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cycle_cnt); data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cur_frag); iwl_trans_release_nic_access(fwrt->trans); data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return data->data; } static void * iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } static void * iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } static void * iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } static void * iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_err_table_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->version = reg->err_table.version; return dump->data; } static void * iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_special_device_memory *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->type = reg->special_mem.type; dump->version = reg->special_mem.version; return dump->data; } static void * iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); } static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { if (fwrt->trans->trans_cfg->gen2) { if (fwrt->trans->init_dram.paging_cnt) return fwrt->trans->init_dram.paging_cnt - 1; else return 0; } return fwrt->num_of_paging_blk; } static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { if (!fw_mon->frags[i].size) break; ranges++; } return ranges; } static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 num_of_fifos = 0; while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos)) num_of_fifos++; return num_of_fifos; } static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { return 1; } static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { /* range is total number of pages need to copied from *IMR memory to SRAM and later from SRAM to DRAM */ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); return 0; } return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size)); } static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_error_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { int i; u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); /* start from 1 to skip CSS section */ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) { size += range_header_len; if (fwrt->trans->trans_cfg->gen2) size += fwrt->trans->init_dram.paging[i].size; else size += fwrt->fw_paging_db[i].fw_paging_size; } return size; } static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { struct iwl_dram_data *frag = &fw_mon->frags[i]; if (!frag->size) break; size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size; } if (size) size += sizeof(struct iwl_fw_ini_monitor_dump); return size; } static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size; size = le32_to_cpu(reg->internal_buffer.size); if (!size) return 0; size += sizeof(struct iwl_fw_ini_monitor_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_monitor_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); while (iwl_ini_txf_iter(fwrt, reg_data, size)) { size += fifo_hdr; if (!reg->fifos.hdr_only) size += iter->fifo_size; } if (!size) return 0; return size + sizeof(struct iwl_fw_ini_error_dump); } static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_ini_rxf_data rx_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); if (reg->fifos.hdr_only) return size; iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data); size += rx_data.size; return size; } static u32 iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->err_table.size); if (size) size += sizeof(struct iwl_fw_ini_err_table_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->special_mem.size); if (size) size += sizeof(struct iwl_fw_ini_special_device_memory) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 size = 0; if (!reg_data->dump_data->fw_pkt) return 0; size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt); if (size) size += sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 size = 0; u32 ranges = 0; u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); return size; } size = imr_size; ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data); if (!size && !ranges) { IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges); return 0; } size += sizeof(struct iwl_fw_ini_error_dump) + ranges * sizeof(struct iwl_fw_ini_error_dump_range); return size; } /** * struct iwl_dump_ini_mem_ops - ini memory dump operations * @get_num_of_ranges: returns the number of memory ranges in the region. * @get_size: returns the total size of the region. * @fill_mem_hdr: fills region type specific headers and returns pointer to * the first range or NULL if failed to fill headers. * @fill_range: copies a given memory range into the dump. * Returns the size of the range or negative error value otherwise. */ struct iwl_dump_ini_mem_ops { u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); u32 (*get_size)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len); int (*fill_range)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range, u32 range_len, int idx); }; /** * iwl_dump_ini_mem * * Creates a dump tlv and copy a memory region into it. * Returns the size of the current dump tlv or 0 if failed * * @fwrt: fw runtime struct * @list: list to add the dump tlv to * @reg_data: memory region * @ops: memory dump operations */ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, struct iwl_dump_ini_region_data *reg_data, const struct iwl_dump_ini_mem_ops *ops) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_error_dump_data *tlv; struct iwl_fw_ini_error_dump_header *header; u32 type = reg->type; u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK); u32 num_of_ranges, i, size; u8 *range; u32 free_size; u64 header_size; u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE; IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n", dump_policy, id, type); if (le32_to_cpu(reg->hdr.version) >= 2) { u32 dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK); if (dump_policy == IWL_FW_INI_DUMP_VERBOSE && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM && !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } } if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || !ops->fill_range) { IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n"); return 0; } size = ops->get_size(fwrt, reg_data); if (size < sizeof(*header)) { IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n"); return 0; } entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size); if (!entry) return 0; entry->size = sizeof(*tlv) + size; tlv = (void *)entry->data; tlv->type = reg->type; tlv->sub_type = reg->sub_type; tlv->sub_type_ver = reg->sub_type_ver; tlv->reserved = reg->reserved; tlv->len = cpu_to_le32(size); num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data); header = (void *)tlv->data; header->region_id = cpu_to_le32(id); header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME); memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME); free_size = size; range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size); if (!range) { IWL_ERR(fwrt, "WRT: Failed to fill region header: id=%d, type=%d\n", id, type); goto out_err; } header_size = range - (u8 *)header; if (WARN(header_size > free_size, #if defined(__linux__) "header size %llu > free_size %d", header_size, free_size)) { #elif defined(__FreeBSD__) "header size %ju > free_size %d", (uintmax_t)header_size, free_size)) { #endif IWL_ERR(fwrt, "WRT: fill_mem_hdr used more than given free_size\n"); goto out_err; } free_size -= header_size; for (i = 0; i < num_of_ranges; i++) { int range_size = ops->fill_range(fwrt, reg_data, range, free_size, i); if (range_size < 0) { IWL_ERR(fwrt, "WRT: Failed to dump region: id=%d, type=%d\n", id, type); goto out_err; } if (WARN(range_size > free_size, "range_size %d > free_size %d", range_size, free_size)) { IWL_ERR(fwrt, "WRT: fill_raged used more than given free_size\n"); goto out_err; } free_size -= range_size; range = range + range_size; } list_add_tail(&entry->list, list); return entry->size; out_err: vfree(entry); return 0; } static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trigger, struct list_head *list) { struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_error_dump_data *tlv; struct iwl_fw_ini_dump_info *dump; struct iwl_dbg_tlv_node *node; struct iwl_fw_ini_dump_cfg_name *cfg_name; u32 size = sizeof(*tlv) + sizeof(*dump); u32 num_of_cfg_names = 0; u32 hw_type; list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { size += sizeof(*cfg_name); num_of_cfg_names++; } entry = vzalloc(sizeof(*entry) + size); if (!entry) return 0; entry->size = size; tlv = (void *)entry->data; tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); tlv->len = cpu_to_le32(size - sizeof(*tlv)); dump = (void *)tlv->data; dump->version = cpu_to_le32(IWL_INI_DUMP_VER); dump->time_point = trigger->time_point; dump->trigger_reason = trigger->trigger_reason; dump->external_cfg_state = cpu_to_le32(fwrt->trans->dbg.external_ini_cfg); dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); /* * Several HWs all have type == 0x42, so we'll override this value * according to the detected HW */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); /* * The HW type depends on certain bits in this case, so add * these bits to the HW type. We won't have collisions since we * add these bits after the highest possible bit in the mask. */ hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT; } dump->hw_type = cpu_to_le32(hw_type); dump->rf_id_flavor = cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id)); dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id)); dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)); dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major); dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor); dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major); dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor); dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest); dump->regions_mask = trigger->regions_mask & ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk); dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag)); memcpy(dump->build_tag, fwrt->fw->human_readable, sizeof(dump->build_tag)); cfg_name = dump->cfg_names; dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names); list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)node->tlv.data; cfg_name->image_type = debug_info->image_type; cfg_name->cfg_name_len = cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME); memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name, sizeof(cfg_name->cfg_name)); cfg_name++; } /* add dump info TLV to the beginning of the list since it needs to be * the first TLV in the dump */ list_add(&entry->list, list); return entry->size; } static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { [IWL_FW_INI_REGION_INVALID] = {}, [IWL_FW_INI_REGION_INTERNAL_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_mon_smem_get_size, .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header, .fill_range = iwl_dump_ini_mon_smem_iter, }, [IWL_FW_INI_REGION_DRAM_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_mon_dram_ranges, .get_size = iwl_dump_ini_mon_dram_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header, .fill_range = iwl_dump_ini_mon_dram_iter, }, [IWL_FW_INI_REGION_TXF] = { .get_num_of_ranges = iwl_dump_ini_txf_ranges, .get_size = iwl_dump_ini_txf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_txf_iter, }, [IWL_FW_INI_REGION_RXF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_rxf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_rxf_iter, }, [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_RSP_OR_NOTIF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_fw_pkt_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_fw_pkt_iter, }, [IWL_FW_INI_REGION_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_dev_mem_iter, }, [IWL_FW_INI_REGION_PERIPHERY_MAC] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_mac_iter, }, [IWL_FW_INI_REGION_PERIPHERY_PHY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_phy_iter, }, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .get_num_of_ranges = iwl_dump_ini_paging_ranges, .get_size = iwl_dump_ini_paging_get_size, .fill_range = iwl_dump_ini_paging_iter, }, [IWL_FW_INI_REGION_CSR] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_csr_iter, }, [IWL_FW_INI_REGION_DRAM_IMR] = { .get_num_of_ranges = iwl_dump_ini_imr_ranges, .get_size = iwl_dump_ini_imr_get_size, .fill_mem_hdr = iwl_dump_ini_imr_fill_header, .fill_range = iwl_dump_ini_imr_iter, }, [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_config_iter, }, [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_special_mem_get_size, .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, .fill_range = iwl_dump_ini_special_mem_iter, }, [IWL_FW_INI_REGION_DBGI_SRAM] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mon_dbgi_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); struct iwl_dump_ini_region_data reg_data = { .dump_data = dump_data, }; + struct iwl_dump_ini_region_data imr_reg_data = { + .dump_data = dump_data, + }; int i; u32 size = 0; u64 regions_mask = le64_to_cpu(trigger->regions_mask) & ~(fwrt->trans->dbg.unsupported_region_msk); BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < ARRAY_SIZE(fwrt->trans->dbg.active_regions)); for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { u32 reg_type; struct iwl_fw_ini_region_tlv *reg; if (!(BIT_ULL(i) & regions_mask)) continue; reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; if (!reg_data.reg_tlv) { IWL_WARN(fwrt, "WRT: Unassigned region id %d, skipping\n", i); continue; } reg = (void *)reg_data.reg_tlv->data; reg_type = reg->type; if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { IWL_WARN(fwrt, "WRT: trying to collect phy prph at time point: %d, skipping\n", tp_id); continue; } + /* + * DRAM_IMR can be collected only for FW/HW error timepoint + * when fw is not alive. In addition, it must be collected + * lastly as it overwrites SRAM that can possibly contain + * debug data which also need to be collected. + */ + if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) { + if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT || + tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR) + imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; + else + IWL_INFO(fwrt, + "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n", + tp_id); + /* continue to next region */ + continue; + } + size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[reg_type]); } + /* collect DRAM_IMR region in the last */ + if (imr_reg_data.reg_tlv) + size += iwl_dump_ini_mem(fwrt, list, ®_data, + &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]); if (size) size += iwl_dump_ini_info(fwrt, trigger, list); return size; } static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trig) { enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 usec = le32_to_cpu(trig->ignore_consec); if (!iwl_trans_dbg_ini_valid(fwrt->trans) || tp_id == IWL_FW_INI_TIME_POINT_INVALID || tp_id >= IWL_FW_INI_TIME_POINT_NUM || iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec)) return false; return true; } static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_dump_file_hdr *hdr; u32 size; if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) || !le64_to_cpu(trigger->regions_mask)) return 0; entry = vzalloc(sizeof(*entry) + sizeof(*hdr)); if (!entry) return 0; entry->size = sizeof(*hdr); size = iwl_dump_ini_trigger(fwrt, dump_data, list); if (!size) { vfree(entry); return 0; } hdr = (void *)entry->data; hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); hdr->file_len = cpu_to_le32(size + entry->size); list_add(&entry->list, list); return le32_to_cpu(hdr->file_len); } static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc) { if (desc && desc != &iwl_dump_desc_assert) kfree(desc); fwrt->dump.lmac_err_id[0] = 0; if (fwrt->smem_cfg.num_lmacs > 1) fwrt->dump.lmac_err_id[1] = 0; fwrt->dump.umac_err_id = 0; } static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { struct iwl_fw_dump_ptrs fw_error_dump = {}; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; u32 dump_mask = fwrt->fw->dbg.dump_mask; dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data); if (!dump_file) return; if (dump_data->monitor_only) dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask, fwrt->sanitize_ops, fwrt->sanitize_ctx); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump.fwrt_len = file_len; if (fw_error_dump.trans_ptr) { file_len += fw_error_dump.trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.fwrt_ptr, fw_error_dump.fwrt_len, 0); if (fw_error_dump.trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.trans_ptr->data, fw_error_dump.trans_ptr->len, fw_error_dump.fwrt_len); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } vfree(fw_error_dump.fwrt_ptr); vfree(fw_error_dump.trans_ptr); } static void iwl_dump_ini_list_free(struct list_head *list) { while (!list_empty(list)) { struct iwl_fw_ini_dump_entry *entry = list_entry(list->next, typeof(*entry), list); list_del(&entry->list); vfree(entry); } } static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data) { dump_data->trig = NULL; kfree(dump_data->fw_pkt); dump_data->fw_pkt = NULL; } static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { #if defined(__linux__) LIST_HEAD(dump_list); #elif defined(__FreeBSD__) LINUX_LIST_HEAD(dump_list); #endif struct scatterlist *sg_dump_data; u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list); if (!file_len) return; sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { struct iwl_fw_ini_dump_entry *entry; int sg_entries = sg_nents(sg_dump_data); u32 offs = 0; list_for_each_entry(entry, &dump_list, list) { sg_pcopy_from_buffer(sg_dump_data, sg_entries, entry->data, entry->size, offs); offs += entry->size; } dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } iwl_dump_ini_list_free(&dump_list); } const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay) { struct iwl_fwrt_wk_data *wk_data; unsigned long idx; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_free_dump_desc(fwrt, desc); return 0; } /* * Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; wk_data = &fwrt->dump.wks[idx]; if (WARN_ON(wk_data->dump_data.desc)) iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc); wk_data->dump_data.desc = desc; wk_data->dump_data.monitor_only = monitor_only; IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type) { if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) return -EIO; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT && trig_type != FW_DBG_TRIGGER_DRIVER) return -EIO; iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, NULL); } else { struct iwl_fw_dump_desc *iwl_dump_error_desc; int ret; iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL); if (!iwl_dump_error_desc) return -ENOMEM; iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type); iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); if (ret) { kfree(iwl_dump_error_desc); return ret; } } iwl_trans_sync_nmi(fwrt->trans); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; if (!le16_to_cpu(trigger->occurrences)) return 0; if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); iwl_force_nmi(fwrt->trans); return 0; } trigger->occurrences = cpu_to_le16(occurrences); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; /* convert msec to usec */ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC); if (!desc) return -ENOMEM; desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { int ret, len = 0; char buf[64]; if (iwl_trans_dbg_ini_valid(fwrt->trans)) return 0; if (fmt) { va_list ap; buf[sizeof(buf) - 1] = '\0'; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* check for truncation */ if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) buf[sizeof(buf) - 1] = '\0'; len = strlen(buf) + 1; } ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger); if (ret) return ret; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, .len = { le16_to_cpu(cmd->len), }, .data = { cmd->data, }, }; ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; ptr += sizeof(*cmd); ptr += le16_to_cpu(cmd->len); } fwrt->dump.conf = conf_id; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data) { struct iwl_dbg_dump_complete_cmd hcmd_data; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD), .data[0] = &hcmd_data, .len[0] = sizeof(hcmd_data), }; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) { hcmd_data.tp = cpu_to_le32(timepoint); hcmd_data.tp_data = cpu_to_le32(timepoint_data); iwl_trans_send_cmd(fwrt->trans, &hcmd); } } /* this function assumes dump_start was called beforehand and dump_end will be * called afterwards */ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) { struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; u32 policy; u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; + if (!dump_data->trig) { + IWL_ERR(fwrt, "dump trigger data is not set\n"); + goto out; + } + if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n"); goto out; } /* there's no point in fw dump if the bus is dead */ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); else iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n"); iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); policy = le32_to_cpu(dump_data->trig->apply_policy); time_point = le32_to_cpu(dump_data->trig->time_point); if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); } if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); } else { iwl_fw_free_dump_desc(fwrt, dump_data->desc); dump_data->desc = NULL; } clear_bit(wk_idx, &fwrt->dump.active_wks); } int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, bool sync) { struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 occur, delay; unsigned long idx; if (!iwl_fw_ini_trigger_on(fwrt, trig)) { IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", tp_id); return -EINVAL; } delay = le32_to_cpu(trig->dump_delay); occur = le32_to_cpu(trig->occurrences); if (!occur) return 0; trig->occurrences = cpu_to_le32(--occur); /* Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; fwrt->dump.wks[idx].dump_data = *dump_data; if (sync) delay = 0; IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", tp_id, (u32)(delay / USEC_PER_MSEC)); - schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); - if (sync) iwl_fw_dbg_collect_sync(fwrt, idx); + else + schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); return 0; } void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fwrt_wk_data *wks = container_of(work, typeof(*wks), wk.work); struct iwl_fw_runtime *fwrt = container_of(wks, typeof(*fwrt), dump.wks[wks->idx]); /* assumes the op mode mutex is locked in dump_start since * iwl_fw_dbg_collect_sync can't run in parallel */ if (fwrt->ops && fwrt->ops->dump_start) fwrt->ops->dump_start(fwrt->ops_ctx); iwl_fw_dbg_collect_sync(fwrt, wks->idx); if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) { const struct iwl_cfg *cfg = fwrt->trans->cfg; if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) return; if (!fwrt->dump.d3_debug_data) { fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL); if (!fwrt->dump.d3_debug_data) { IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n"); return; } } /* if the buffer holds previous debug data it is overwritten */ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt) { int i; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) iwl_fw_dbg_collect_sync(fwrt, i); iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync); static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) { struct iwl_dbg_suspend_resume_cmd cmd = { .operation = suspend ? cpu_to_le32(DBGC_SUSPEND_CMD) : cpu_to_le32(DBGC_RESUME_CMD), }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME), .data[0] = &cmd, .len[0] = sizeof(cmd), }; return iwl_trans_send_cmd(trans, &hcmd); } static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); return; } if (params) { params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); } iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); /* wait for the DBGC to finish writing the internal buffer to DRAM to * avoid halting the HW while writing */ usleep_range(700, 1000); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); } static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (!params) return -EIO; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } return 0; } void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) { int ret __maybe_unused = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); else if (stop) iwl_fw_dbg_stop_recording(fwrt->trans, params); else ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); #ifdef CONFIG_IWLWIFI_DEBUGFS if (!ret) { if (stop) fwrt->trans->dbg.rec_on = false; else iwl_fw_set_dbg_rec_on(fwrt); } #endif } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/sys/contrib/dev/iwlwifi/fw/file.h b/sys/contrib/dev/iwlwifi/fw/file.h index fa2a73ae4183..5679a78758be 100644 --- a/sys/contrib/dev/iwlwifi/fw/file.h +++ b/sys/contrib/dev/iwlwifi/fw/file.h @@ -1,985 +1,955 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2008-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_file_h__ #define __iwl_fw_file_h__ #include #include /* v1/v2 uCode file layout */ struct iwl_ucode_header { __le32 ver; /* major/minor/API/serial */ union { struct { __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v1; struct { __le32 build; /* build number */ __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v2; } u; }; #define IWL_UCODE_TLV_DEBUG_BASE 0x1000005 #define IWL_UCODE_TLV_CONST_BASE 0x100 /* * new TLV uCode file layout * * The new TLV file format contains TLVs, that each specify * some piece of data. */ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_INVALID = 0, /* unused */ IWL_UCODE_TLV_INST = 1, IWL_UCODE_TLV_DATA = 2, IWL_UCODE_TLV_INIT = 3, IWL_UCODE_TLV_INIT_DATA = 4, IWL_UCODE_TLV_BOOT = 5, IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */ IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */ IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, IWL_UCODE_TLV_WOWLAN_INST = 16, IWL_UCODE_TLV_WOWLAN_DATA = 17, IWL_UCODE_TLV_FLAGS = 18, IWL_UCODE_TLV_SEC_RT = 19, IWL_UCODE_TLV_SEC_INIT = 20, IWL_UCODE_TLV_SEC_WOWLAN = 21, IWL_UCODE_TLV_DEF_CALIB = 22, IWL_UCODE_TLV_PHY_SKU = 23, IWL_UCODE_TLV_SECURE_SEC_RT = 24, IWL_UCODE_TLV_SECURE_SEC_INIT = 25, IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, IWL_UCODE_TLV_NUM_OF_CPU = 27, IWL_UCODE_TLV_CSCHEME = 28, IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, IWL_UCODE_TLV_PAGING = 32, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, /* 35 is unused */ IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_CMD_VERSIONS = 48, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, IWL_UCODE_TLV_HW_TYPE = 58, IWL_UCODE_TLV_FW_FSEQ_VERSION = 60, IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61, IWL_UCODE_TLV_PNVM_VERSION = 62, IWL_UCODE_TLV_PNVM_SKU = 64, IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1, IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3, IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4, IWL_UCODE_TLV_TYPE_CONF_SET = IWL_UCODE_TLV_DEBUG_BASE + 5, IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, }; struct iwl_ucode_tlv { __le32 type; /* see above */ __le32 length; /* not including type/length fields */ u8 data[]; }; #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 #define FW_VER_HUMAN_READABLE_SZ 64 struct iwl_tlv_ucode_header { /* * The TLV style ucode header is distinguished from * the v1/v2 style header by first four bytes being * zero, as such is an invalid combination of * major/minor/API/serial versions. */ __le32 zero; __le32 magic; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; /* major/minor/API/serial or major in new format */ __le32 ver; __le32 build; __le64 ignore; /* * The data contained herein has a TLV layout, * see above for the TLV header and types. * Note that each TLV is padded to a length * that is a multiple of 4 for alignment. */ u8 data[0]; }; /* * ucode TLVs * * ability to get extension for: flags & capabilities from ucode binaries files */ struct iwl_ucode_api { __le32 api_index; __le32 api_flags; } __packed; struct iwl_ucode_capa { __le32 api_index; __le32 api_capa; } __packed; /** * enum iwl_ucode_tlv_flag - ucode API flags * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously * was a separate TLV but moved here to save space. * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, * treats good CRC threshold as a boolean * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of block list instead of 64 in scan * offload profile config command. * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element * from the probe request template. * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save - * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), IWL_UCODE_TLV_FLAGS_MFP = BIT(2), IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), - IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), }; typedef unsigned int __bitwise iwl_ucode_tlv_api_t; /** * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan * iteration complete notification, and the timestamp reported for RX * received during scan, are reported in TSF of the mac specified in the * scan request. * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of * ADD_MODIFY_STA_KEY_API_S_VER_2. * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is * deprecated. * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8 * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8 * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of * the REDUCE_TX_POWER_CMD. * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short * version of the beacon notification. * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of * BEACON_FILTER_CONFIG_API_S_VER_4. * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of * REGULATORY_NVM_GET_INFO_RSP_API_S. * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S. * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar * version tables. * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of * SCAN_CONFIG_DB_CMD_API_S. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { /* API Set 0 */ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44, IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46, IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50, IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ #define NUM_IWL_UCODE_TLV_API 128 #else NUM_IWL_UCODE_TLV_API #endif }; typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report * action frame * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current * channel in DS parameter set element in probe requests. * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in * probe requests. * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it * is standalone or with a BSS station interface in the same binding. * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting * stabilization latency for SoCs. * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band * (6 GHz). * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA * countdown offloading. Beacon notifications are not sent to the host. * The fw also offloads TBTT alignment. * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * regular image. * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared * memory addresses from the firmware. * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger * command size (command version 4) that supports toggling ACK TX * power reduction. * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured * to report the CSI information with (certain) RX frames * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC * channels even when these are not enabled. * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection * complete to FW. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ enum iwl_ucode_tlv_capa { /* set 0 */ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, /* set 1 */ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50, IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52, IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59, IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)92, IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93, /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, /* * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels */ IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ #define NUM_IWL_UCODE_TLV_CAPA 128 #else NUM_IWL_UCODE_TLV_CAPA #endif }; /* The default calibrate table size if not specified by firmware file */ #define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 /* The default max probe length if not specified by the firmware file */ #define IWL_DEFAULT_MAX_PROBE_LENGTH 200 /* * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC #define PAGING_SEPARATOR_SECTION 0xAAAABBBB /* uCode version contains 4 values: Major/Minor/API/Serial */ #define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) #define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) #define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) #define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) /** * struct iwl_tlv_calib_ctrl - Calibration control struct. * Sent as part of the phy configuration command. * @flow_trigger: bitmap for which calibrations to perform according to * flow triggers. * @event_trigger: bitmap for which calibrations to perform according to * event triggers. */ struct iwl_tlv_calib_ctrl { __le32 flow_trigger; __le32 event_trigger; } __packed; enum iwl_fw_phy_cfg { FW_PHY_CFG_RADIO_TYPE_POS = 0, FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, FW_PHY_CFG_RADIO_STEP_POS = 2, FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, FW_PHY_CFG_RADIO_DASH_POS = 4, FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, FW_PHY_CFG_TX_CHAIN_POS = 16, FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, FW_PHY_CFG_CHAIN_SAD_POS = 23, FW_PHY_CFG_CHAIN_SAD_ENABLED = 0x1 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_CHAIN_SAD_ANT_A = 0x2 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_CHAIN_SAD_ANT_B = 0x4 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_SHARED_CLK = BIT(31), }; -#define IWL_UCODE_MAX_CS 1 - -/** - * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. - * @cipher: a cipher suite selector - * @flags: cipher scheme flags (currently reserved for a future use) - * @hdr_len: a size of MPDU security header - * @pn_len: a size of PN - * @pn_off: an offset of pn from the beginning of the security header - * @key_idx_off: an offset of key index byte in the security header - * @key_idx_mask: a bit mask of key_idx bits - * @key_idx_shift: bit shift needed to get key_idx - * @mic_len: mic length in bytes - * @hw_cipher: a HW cipher index used in host commands - */ -struct iwl_fw_cipher_scheme { - __le32 cipher; - u8 flags; - u8 hdr_len; - u8 pn_len; - u8 pn_off; - u8 key_idx_off; - u8 key_idx_mask; - u8 key_idx_shift; - u8 mic_len; - u8 hw_cipher; -} __packed; - enum iwl_fw_dbg_reg_operator { CSR_ASSIGN, CSR_SETBIT, CSR_CLEARBIT, PRPH_ASSIGN, PRPH_SETBIT, PRPH_CLEARBIT, INDIRECT_ASSIGN, INDIRECT_SETBIT, INDIRECT_CLEARBIT, PRPH_BLOCKBIT, }; /** * struct iwl_fw_dbg_reg_op - an operation on a register * * @op: &enum iwl_fw_dbg_reg_operator * @addr: offset of the register * @val: value */ struct iwl_fw_dbg_reg_op { u8 op; u8 reserved[3]; __le32 addr; __le32 val; } __packed; /** * enum iwl_fw_dbg_monitor_mode - available monitor recording modes * * @SMEM_MODE: monitor stores the data in SMEM * @EXTERNAL_MODE: monitor stores the data in allocated DRAM * @MARBH_MODE: monitor stores the data in MARBH buffer * @MIPI_MODE: monitor outputs the data through the MIPI interface */ enum iwl_fw_dbg_monitor_mode { SMEM_MODE = 0, EXTERNAL_MODE = 1, MARBH_MODE = 2, MIPI_MODE = 3, }; /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * * @data_type: the memory segment type to record * @ofs: the memory segment offset * @len: the memory segment length, in bytes * * This parses IWL_UCODE_TLV_FW_MEM_SEG */ struct iwl_fw_dbg_mem_seg_tlv { __le32 data_type; __le32 ofs; __le32 len; } __packed; /** * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data * * @version: version of the TLV - currently 0 * @monitor_mode: &enum iwl_fw_dbg_monitor_mode * @size_power: buffer size will be 2^(size_power + 11) * @base_reg: addr of the base addr register (PRPH) * @end_reg: addr of the end addr register (PRPH) * @write_ptr_reg: the addr of the reg of the write pointer * @wrap_count: the addr of the reg of the wrap_count * @base_shift: shift right of the base addr reg * @end_shift: shift right of the end addr reg * @reg_ops: array of registers operations * * This parses IWL_UCODE_TLV_FW_DBG_DEST */ struct iwl_fw_dbg_dest_tlv_v1 { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 base_reg; __le32 end_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 end_shift; struct iwl_fw_dbg_reg_op reg_ops[0]; } __packed; /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ #define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000 /* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */ #define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff /* The smem buffer chunks are in units of 256 bits */ #define IWL_M2S_UNIT_SIZE 0x100 struct iwl_fw_dbg_dest_tlv { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 cfg_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 size_shift; struct iwl_fw_dbg_reg_op reg_ops[0]; } __packed; struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; __le16 len; u8 data[0]; } __packed; /** * enum iwl_fw_dbg_trigger_mode - triggers functionalities * * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to * collect only monitor data */ enum iwl_fw_dbg_trigger_mode { IWL_FW_DBG_TRIGGER_START = BIT(0), IWL_FW_DBG_TRIGGER_STOP = BIT(1), IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), }; /** * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart */ enum iwl_fw_dbg_trigger_flags { IWL_FW_DBG_FORCE_RESTART = BIT(0), }; /** * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger * @IWL_FW_DBG_CONF_VIF_ANY: any vif type * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode * @IWL_FW_DBG_CONF_VIF_AP: AP mode * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device */ enum iwl_fw_dbg_trigger_vif_type { IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, }; /** * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger * @id: &enum iwl_fw_dbg_trigger * @vif_type: &enum iwl_fw_dbg_trigger_vif_type * @stop_conf_ids: bitmap of configurations this trigger relates to. * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding * to the currently running configuration is set, the data should be * collected. * @stop_delay: how many milliseconds to wait before collecting the data * after the STOP trigger fires. * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what * configuration should be applied when the triggers kicks in. * @occurrences: number of occurrences. 0 means the trigger will never fire. * @trig_dis_ms: the time, in milliseconds, after an occurrence of this * trigger in which another occurrence should be ignored. * @flags: &enum iwl_fw_dbg_trigger_flags */ struct iwl_fw_dbg_trigger_tlv { __le32 id; __le32 vif_type; __le32 stop_conf_ids; __le32 stop_delay; u8 mode; u8 start_conf_id; __le16 occurrences; __le16 trig_dis_ms; u8 flags; u8 reserved[5]; u8 data[0]; } __packed; #define FW_DBG_START_FROM_ALIVE 0 #define FW_DBG_CONF_MAX 32 #define FW_DBG_INVALID 0xff /** * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons * @stop_consec_missed_bcon: stop recording if threshold is crossed. * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. * @start_consec_missed_bcon: start recording if threshold is crossed. * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. * @reserved1: reserved * @reserved2: reserved */ struct iwl_fw_dbg_trigger_missed_bcon { __le32 stop_consec_missed_bcon; __le32 stop_consec_missed_bcon_since_rx; __le32 reserved2[2]; __le32 start_consec_missed_bcon; __le32 start_consec_missed_bcon_since_rx; __le32 reserved1[2]; } __packed; /** * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. * cmds: the list of commands to trigger the collection on */ struct iwl_fw_dbg_trigger_cmd { struct cmd { u8 cmd_id; u8 group_id; } __packed cmds[16]; } __packed; /** * iwl_fw_dbg_trigger_stats - configures trigger for statistics * @stop_offset: the offset of the value to be monitored * @stop_threshold: the threshold above which to collect * @start_offset: the offset of the value to be monitored * @start_threshold: the threshold above which to start recording */ struct iwl_fw_dbg_trigger_stats { __le32 stop_offset; __le32 stop_threshold; __le32 start_offset; __le32 start_threshold; } __packed; /** * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI * @rssi: RSSI value to trigger at */ struct iwl_fw_dbg_trigger_low_rssi { __le32 rssi; } __packed; /** * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events * @stop_auth_denied: number of denied authentication to collect * @stop_auth_timeout: number of authentication timeout to collect * @stop_rx_deauth: number of Rx deauth before to collect * @stop_tx_deauth: number of Tx deauth before to collect * @stop_assoc_denied: number of denied association to collect * @stop_assoc_timeout: number of association timeout to collect * @stop_connection_loss: number of connection loss to collect * @start_auth_denied: number of denied authentication to start recording * @start_auth_timeout: number of authentication timeout to start recording * @start_rx_deauth: number of Rx deauth to start recording * @start_tx_deauth: number of Tx deauth to start recording * @start_assoc_denied: number of denied association to start recording * @start_assoc_timeout: number of association timeout to start recording * @start_connection_loss: number of connection loss to start recording */ struct iwl_fw_dbg_trigger_mlme { u8 stop_auth_denied; u8 stop_auth_timeout; u8 stop_rx_deauth; u8 stop_tx_deauth; u8 stop_assoc_denied; u8 stop_assoc_timeout; u8 stop_connection_loss; u8 reserved; u8 start_auth_denied; u8 start_auth_timeout; u8 start_rx_deauth; u8 start_tx_deauth; u8 start_assoc_denied; u8 start_assoc_timeout; u8 start_connection_loss; u8 reserved2; } __packed; /** * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer * @command_queue: timeout for the command queue in ms * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms * @softap: timeout for the queues of a softAP in ms * @p2p_go: timeout for the queues of a P2P GO in ms * @p2p_client: timeout for the queues of a P2P client in ms * @p2p_device: timeout for the queues of a P2P device in ms * @ibss: timeout for the queues of an IBSS in ms * @tdls: timeout for the queues of a TDLS station in ms */ struct iwl_fw_dbg_trigger_txq_timer { __le32 command_queue; __le32 bss; __le32 softap; __le32 p2p_go; __le32 p2p_client; __le32 p2p_device; __le32 ibss; __le32 tdls; __le32 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger * time_Events: a list of tuples . The driver will issue a * trigger each time a time event notification that relates to time event * id with one of the actions in the bitmap is received and * BIT(notif->status) is set in status_bitmap. * */ struct iwl_fw_dbg_trigger_time_event { struct { __le32 id; __le32 action_bitmap; __le32 status_bitmap; } __packed time_events[16]; } __packed; /** * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger * rx_ba_start: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is started. * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is stopped. * tx_ba_start: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is started. * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is stopped. * rx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is received (for a Tx BlockAck session). * tx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is send (for an Rx BlocAck session). * frame_timeout: tid bitmap to configure on what tid the trigger should occur * when a frame times out in the reordering buffer. */ struct iwl_fw_dbg_trigger_ba { __le16 rx_ba_start; __le16 rx_ba_stop; __le16 tx_ba_start; __le16 tx_ba_stop; __le16 rx_bar; __le16 tx_bar; __le16 frame_timeout; } __packed; /** * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events. * @action_bitmap: the TDLS action to trigger the collection upon * @peer_mode: trigger on specific peer or all * @peer: the TDLS peer to trigger the collection on */ struct iwl_fw_dbg_trigger_tdls { u8 action_bitmap; u8 peer_mode; u8 peer[ETH_ALEN]; u8 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response * status. * @statuses: the list of statuses to trigger the collection on */ struct iwl_fw_dbg_trigger_tx_status { struct tx_status { u8 status; u8 reserved[3]; } __packed statuses[16]; __le32 reserved[2]; } __packed; /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used * @num_of_hcmds: how many HCMDs to send are present here * @hcmd: a variable length host command to be sent to apply the configuration. * If there is more than one HCMD to send, they will appear one after the * other and be sent in the order that they appear in. * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to * %FW_DBG_CONF_MAX configuration per run. */ struct iwl_fw_dbg_conf_tlv { u8 id; u8 usniffer; u8 reserved; u8 num_of_hcmds; struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; #define IWL_FW_CMD_VER_UNKNOWN 99 /** * struct iwl_fw_cmd_version - firmware command version entry * @cmd: command ID * @group: group ID * @cmd_ver: command version * @notif_ver: notification version */ struct iwl_fw_cmd_version { u8 cmd; u8 group; u8 cmd_ver; u8 notif_ver; } __packed; struct iwl_fw_tcm_error_addr { __le32 addr; }; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */ struct iwl_fw_dump_exclude { __le32 addr, size; }; static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, size_t fixed_size, size_t var_size) { size_t var_len = le32_to_cpu(tlv->length) - fixed_size; if (WARN_ON(var_len % var_size)) return 0; return var_len / var_size; } #define iwl_tlv_array_len(_tlv_ptr, _struct_ptr, _memb) \ _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \ sizeof(_struct_ptr->_memb[0])) #endif /* __iwl_fw_file_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/img.h b/sys/contrib/dev/iwlwifi/fw/img.h index 89869edb23e8..f878ac508801 100644 --- a/sys/contrib/dev/iwlwifi/fw/img.h +++ b/sys/contrib/dev/iwlwifi/fw/img.h @@ -1,286 +1,274 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #ifndef __iwl_fw_img_h__ #define __iwl_fw_img_h__ #include #include "api/dbg-tlv.h" #include "file.h" #include "error-dump.h" /** * enum iwl_ucode_type * * The type of ucode. * * @IWL_UCODE_REGULAR: Normal runtime ucode * @IWL_UCODE_INIT: Initial ucode * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image */ enum iwl_ucode_type { IWL_UCODE_REGULAR, IWL_UCODE_INIT, IWL_UCODE_WOWLAN, IWL_UCODE_REGULAR_USNIFFER, IWL_UCODE_TYPE_MAX, }; /* * enumeration of ucode section. * This enumeration is used directly for older firmware (before 16.0). * For new firmware, there can be up to 4 sections (see below) but the * first one packaged into the firmware file is the DATA section and * some debugging code accesses that. */ enum iwl_ucode_sec { IWL_UCODE_SECTION_DATA, IWL_UCODE_SECTION_INST, }; struct iwl_ucode_capabilities { u32 max_probe_length; u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; u32 error_log_addr; u32 error_log_size; u32 num_stations; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; const struct iwl_fw_cmd_version *cmd_versions; u32 n_cmd_versions; }; static inline bool fw_has_api(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_api_t api) { return test_bit((__force long)api, capabilities->_api); } static inline bool fw_has_capa(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_capa_t capa) { return test_bit((__force long)capa, capabilities->_capa); } /* one for each uCode image (inst/data, init/runtime/wowlan) */ struct fw_desc { const void *data; /* vmalloc'ed data */ u32 len; /* size in bytes */ u32 offset; /* offset in the device */ }; struct fw_img { struct fw_desc *sec; int num_sec; bool is_dual_cpus; u32 paging_mem_size; }; /* * Block paging calculations */ #define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ #define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ #define PAGE_PER_GROUP_2_EXP_SIZE 3 /* 8 pages per group */ #define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) /* don't change, support only 32KB size */ #define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) /* 32K == 2^15 */ #define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) /* * Image paging calculations */ #define BLOCK_PER_IMAGE_2_EXP_SIZE 5 /* 2^5 == 32 blocks per image */ #define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) /* maximum image size 1024KB */ #define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) /* Virtual address signature */ #define PAGING_ADDR_SIG 0xAA000000 #define PAGING_CMD_IS_SECURED BIT(9) #define PAGING_CMD_IS_ENABLED BIT(8) #define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 #define PAGING_TLV_SECURE_MASK 1 /* FW MSB Mask for regions/cache_control */ #define FW_ADDR_CACHE_CONTROL 0xC0000000UL /** * struct iwl_fw_paging * @fw_paging_phys: page phy pointer * @fw_paging_block: pointer to the allocated block * @fw_paging_size: page size * @fw_offs: offset in the device */ struct iwl_fw_paging { dma_addr_t fw_paging_phys; struct page *fw_paging_block; u32 fw_paging_size; u32 fw_offs; }; -/** - * struct iwl_fw_cscheme_list - a cipher scheme list - * @size: a number of entries - * @cs: cipher scheme entries - */ -struct iwl_fw_cscheme_list { - u8 size; - struct iwl_fw_cipher_scheme cs[]; -} __packed; - /** * enum iwl_fw_type - iwlwifi firmware type * @IWL_FW_DVM: DVM firmware * @IWL_FW_MVM: MVM firmware */ enum iwl_fw_type { IWL_FW_DVM, IWL_FW_MVM, }; /** * struct iwl_fw_dbg - debug data * * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM) * @n_dest_reg: num of reg_ops in dest_tlv * @conf_tlv: array of pointers to configuration HCMDs * @trigger_tlv: array of pointers to triggers TLVs * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries * @mem_tlv: Runtime addresses to dump * @n_mem_tlv: number of runtime addresses * @dump_mask: bitmask of dump regions */ struct iwl_fw_dbg { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; u8 n_dest_reg; struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; size_t n_mem_tlv; u32 dump_mask; }; struct iwl_dump_exclude { u32 addr, size; }; /** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file * @fw_version: firmware version string * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. * @iml_len: length of the image loader image * @iml: image loader fw image * @ucode_capa: capabilities parsed from the ucode file. * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. * @init_evtlog_size: event log size for init ucode. * @init_errlog_ptr: error log offfset for init ucode. * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. * @inst_errlog_ptr: error log offfset for runtime ucode. * @type: firmware type (&enum iwl_fw_type) - * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * we get the ALIVE from the uCode * @phy_integration_ver: PHY integration version string * @phy_integration_ver_len: length of @phy_integration_ver * @dump_excl: image dump exclusion areas for RT image * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image */ struct iwl_fw { u32 ucode_ver; char fw_version[64]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; size_t iml_len; u8 *iml; struct iwl_ucode_capabilities ucode_capa; bool enhance_sensitivity_table; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; u32 phy_config; u8 valid_tx_ant; u8 valid_rx_ant; enum iwl_fw_type type; - struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; struct iwl_fw_dbg dbg; u8 *phy_integration_ver; u32 phy_integration_ver_len; struct iwl_dump_exclude dump_excl[2], dump_excl_wowlan[2]; }; static inline const char *get_fw_dbg_mode_string(int mode) { switch (mode) { case SMEM_MODE: return "SMEM"; case EXTERNAL_MODE: return "EXTERNAL_DRAM"; case MARBH_MODE: return "MARBH"; case MIPI_MODE: return "MIPI"; default: return "UNKNOWN"; } } static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id]; if (!conf_tlv) return false; return conf_tlv->usniffer; } static inline const struct fw_img * iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) { if (ucode_type >= IWL_UCODE_TYPE_MAX) return NULL; return &fw->img[ucode_type]; } u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def); u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); const char *iwl_fw_lookup_assert_desc(u32 num); #define FW_SYSASSERT_CPU_MASK 0xf0000000 #define FW_SYSASSERT_PNVM_MISSING 0x0010070d #endif /* __iwl_fw_img_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/rs.c b/sys/contrib/dev/iwlwifi/fw/rs.c index a21c3befd93b..a835214611ce 100644 --- a/sys/contrib/dev/iwlwifi/fw/rs.c +++ b/sys/contrib/dev/iwlwifi/fw/rs.c @@ -1,252 +1,255 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2021 Intel Corporation */ #include #include "fw/api/rs.h" #include "iwl-drv.h" #include "iwl-config.h" #define IWL_DECLARE_RATE_INFO(r) \ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP /* * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP * */ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { IWL_DECLARE_RATE_INFO(1), IWL_DECLARE_RATE_INFO(2), IWL_DECLARE_RATE_INFO(5), IWL_DECLARE_RATE_INFO(11), IWL_DECLARE_RATE_INFO(6), IWL_DECLARE_RATE_INFO(9), IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18), IWL_DECLARE_RATE_INFO(24), IWL_DECLARE_RATE_INFO(36), IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54), }; /* mbps, mcs */ static const struct iwl_rate_mcs_info rate_mcs[IWL_RATE_COUNT] = { { "1", "BPSK DSSS"}, { "2", "QPSK DSSS"}, {"5.5", "BPSK CCK"}, { "11", "QPSK CCK"}, { "6", "BPSK 1/2"}, { "9", "BPSK 1/2"}, { "12", "QPSK 1/2"}, { "18", "QPSK 3/4"}, { "24", "16QAM 1/2"}, { "36", "16QAM 3/4"}, { "48", "64QAM 2/3"}, { "54", "64QAM 3/4"}, { "60", "64QAM 5/6"}, }; static const char * const ant_name[] = { [ANT_NONE] = "None", [ANT_A] = "A", [ANT_B] = "B", [ANT_AB] = "AB", }; static const char * const pretty_bw[] = { "20Mhz", "40Mhz", "80Mhz", "160 Mhz", "320Mhz", }; u8 iwl_fw_rate_idx_to_plcp(int idx) { return fw_rate_idx_to_plcp[idx]; } IWL_EXPORT_SYMBOL(iwl_fw_rate_idx_to_plcp); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx) { return &rate_mcs[idx]; } IWL_EXPORT_SYMBOL(iwl_rate_mcs); const char *iwl_rs_pretty_ant(u8 ant) { if (ant >= ARRAY_SIZE(ant_name)) return "UNKNOWN"; return ant_name[ant]; } IWL_EXPORT_SYMBOL(iwl_rs_pretty_ant); const char *iwl_rs_pretty_bw(int bw) { if (bw >= ARRAY_SIZE(pretty_bw)) return "unknown bw"; return pretty_bw[bw]; } IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); +static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return IWL_RATE_INVALID; +} + u32 iwl_new_rate_from_v1(u32 rate_v1) { u32 rate_v2 = 0; u32 dup = 0; if (rate_v1 == 0) return rate_v1; /* convert rate */ if (rate_v1 & RATE_MCS_HT_MSK_V1) { u32 nss = 0; rate_v2 |= RATE_MCS_HT_MSK; rate_v2 |= rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >> RATE_HT_MCS_NSS_POS_V1; rate_v2 |= nss << RATE_MCS_NSS_POS; } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || rate_v1 & RATE_MCS_HE_MSK_V1) { rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK; if (rate_v1 & RATE_MCS_HE_MSK_V1) { u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> RATE_MCS_HE_106T_POS_V1; u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> RATE_MCS_HE_GI_LTF_POS; if ((he_type_bits == RATE_MCS_HE_TYPE_SU || he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && he_gi_ltf == RATE_MCS_HE_SU_4_LTF) /* the new rate have an additional bit to * represent the value 4 rather then using SGI * bit for this purpose - as it was done in the old * rate */ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> RATE_MCS_SGI_POS_V1; rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS; rate_v2 |= he_106t << RATE_MCS_HE_106T_POS; rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; rate_v2 |= RATE_MCS_HE_MSK; } else { rate_v2 |= RATE_MCS_VHT_MSK; } /* if legacy format */ } else { u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); - WARN_ON(legacy_rate < 0); + if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) + legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? + IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; + rate_v2 |= legacy_rate; if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; } /* convert flags */ if (rate_v1 & RATE_MCS_LDPC_MSK_V1) rate_v2 |= RATE_MCS_LDPC_MSK; rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | (rate_v1 & RATE_MCS_ANT_AB_MSK) | (rate_v1 & RATE_MCS_STBC_MSK) | (rate_v1 & RATE_MCS_BF_MSK); dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; if (dup) { rate_v2 |= RATE_MCS_DUP_MSK; rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS; } if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && (rate_v1 & RATE_MCS_SGI_MSK_V1)) rate_v2 |= RATE_MCS_SGI_MSK; return rate_v2; } IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; - int idx; - bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); - int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; - int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; - - for (idx = offset; idx < last; idx++) - if (iwl_fw_rate_idx_to_plcp(idx) == rate) - return idx - offset; - return -1; -} - int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type; u8 mcs = 0, nss = 0; u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS; u32 format = rate & RATE_MCS_MOD_TYPE_MSK; bool sgi; if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int legacy_rate = rate & RATE_LEGACY_RATE_MSK; int index = format == RATE_MCS_CCK_MSK ? legacy_rate : legacy_rate + IWL_FIRST_OFDM_RATE; return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", iwl_rs_pretty_ant(ant), index == IWL_RATE_INVALID ? "BAD" : iwl_rate_mcs(index)->mbps); } if (format == RATE_MCS_VHT_MSK) type = "VHT"; else if (format == RATE_MCS_HT_MSK) type = "HT"; else if (format == RATE_MCS_HE_MSK) type = "HE"; else type = "Unknown"; /* shouldn't happen */ mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate) : rate & RATE_MCS_CODE_MSK; nss = ((rate & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; sgi = format == RATE_MCS_HE_MSK ? iwl_he_is_sgi(rate) : rate & RATE_MCS_SGI_MSK; return scnprintf(buf, bufsz, "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s", rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss, (sgi) ? "SGI " : "NGI ", (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "", (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } IWL_EXPORT_SYMBOL(rs_pretty_print_rate); bool iwl_he_is_sgi(u32 rate_n_flags) { u32 type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u32 ltf_gi = rate_n_flags & RATE_MCS_HE_GI_LTF_MSK; if (type == RATE_MCS_HE_TYPE_SU || type == RATE_MCS_HE_TYPE_EXT_SU) return ltf_gi == RATE_MCS_HE_SU_4_LTF_08_GI; return false; } IWL_EXPORT_SYMBOL(iwl_he_is_sgi); diff --git a/sys/contrib/dev/iwlwifi/fw/runtime.h b/sys/contrib/dev/iwlwifi/fw/runtime.h index afc822cab674..d3cb1ae68a96 100644 --- a/sys/contrib/dev/iwlwifi/fw/runtime.h +++ b/sys/contrib/dev/iwlwifi/fw/runtime.h @@ -1,205 +1,206 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ #include "iwl-config.h" #include "iwl-trans.h" #include "img.h" #include "fw/api/debug.h" #include "fw/api/paging.h" #include "fw/api/power.h" #include "iwl-eeprom-parse.h" #include "fw/acpi.h" struct iwl_fw_runtime_ops { void (*dump_start)(void *ctx); void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); bool (*d3_debug_enable)(void *ctx); }; #define MAX_NUM_LMAC 2 struct iwl_fwrt_shared_mem_cfg { int num_lmacs; int num_txfifo_entries; struct { u32 txfifo_size[TX_FIFO_MAX_NUM]; u32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; u32 rxfifo2_size; u32 rxfifo2_control_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; #define IWL_FW_RUNTIME_DUMP_WK_NUM 5 /** * struct iwl_fwrt_dump_data - dump data * @trig: trigger the worker was scheduled upon * @fw_pkt: packet received from FW */ struct iwl_fwrt_dump_data { union { struct { struct iwl_fw_ini_trigger_tlv *trig; struct iwl_rx_packet *fw_pkt; }; struct { const struct iwl_fw_dump_desc *desc; bool monitor_only; }; }; }; /** * struct iwl_fwrt_wk_data - dump worker data struct * @idx: index of the worker * @wk: worker */ struct iwl_fwrt_wk_data { u8 idx; struct delayed_work wk; struct iwl_fwrt_dump_data dump_data; }; /** * struct iwl_txf_iter_data - Tx fifo iterator data struct * @fifo: fifo number * @lmac: lmac number * @fifo_size: fifo size * @internal_txf: non zero if fifo is internal Tx fifo */ struct iwl_txf_iter_data { int fifo; int lmac; u32 fifo_size; u8 internal_txf; }; /** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image * @cfg: NIC configuration * @dev: device pointer * @ops: user ops * @ops_ctx: user ops context * @fw_paging_db: paging database * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block * @smem_cfg: saved firmware SMEM configuration * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data */ struct iwl_fw_runtime { struct iwl_trans *trans; const struct iwl_fw *fw; struct device *dev; const struct iwl_fw_runtime_ops *ops; void *ops_ctx; const struct iwl_dump_sanitize_ops *sanitize_ops; void *sanitize_ctx; /* Paging */ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; u16 num_of_pages_in_last_blk; enum iwl_ucode_type cur_fw_img; /* memory configuration */ struct iwl_fwrt_shared_mem_cfg smem_cfg; /* debug */ struct { struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM]; unsigned long active_wks; u8 conf; /* ts of the beginning of a non-collect fw dbg data period */ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM]; u32 *d3_debug_data; u32 lmac_err_id[MAX_NUM_LMAC]; u32 umac_err_id; struct iwl_txf_iter_data txf_iter_data; struct { u8 type; u8 subtype; u32 lmac_major; u32 lmac_minor; u32 umac_major; u32 umac_minor; } fw_ver; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { struct delayed_work wk; u32 delay; u64 seq; } timestamp; bool tpc_enabled; #endif /* CONFIG_IWLWIFI_DEBUGFS */ #ifdef CONFIG_ACPI struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; u8 sar_chain_a_profile; u8 sar_chain_b_profile; struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; u32 geo_rev; u32 geo_num_profiles; bool geo_enabled; struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; u32 ppag_flags; u32 ppag_ver; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; + u8 reduced_power_flags; #endif }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx, struct dentry *dbgfs_dir); static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) { int i; kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) cancel_delayed_work_sync(&fwrt->dump.wks[i].wk); } void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) { fwrt->cur_fw_img = cur_fw_img; } int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt); int iwl_configure_rxq(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-config.h b/sys/contrib/dev/iwlwifi/iwl-config.h index b7e430ad5e2a..f5b556a103e8 100644 --- a/sys/contrib/dev/iwlwifi/iwl-config.h +++ b/sys/contrib/dev/iwlwifi/iwl-config.h @@ -1,658 +1,661 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ #include #include #include #include #include "iwl-csr.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, IWL_DEVICE_FAMILY_1000, IWL_DEVICE_FAMILY_100, IWL_DEVICE_FAMILY_2000, IWL_DEVICE_FAMILY_2030, IWL_DEVICE_FAMILY_105, IWL_DEVICE_FAMILY_135, IWL_DEVICE_FAMILY_5000, IWL_DEVICE_FAMILY_5150, IWL_DEVICE_FAMILY_6000, IWL_DEVICE_FAMILY_6000i, IWL_DEVICE_FAMILY_6005, IWL_DEVICE_FAMILY_6030, IWL_DEVICE_FAMILY_6050, IWL_DEVICE_FAMILY_6150, IWL_DEVICE_FAMILY_7000, IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_AX210, IWL_DEVICE_FAMILY_BZ, }; /* * LED mode * IWL_LED_DEFAULT: use device default * IWL_LED_RF_STATE: turn LED on/off based on RF state * LED ON = RF ON * LED OFF = RF OFF * IWL_LED_BLINK: adjust led blink rate based on blink table * IWL_LED_DISABLE: led disabled */ enum iwl_led_mode { IWL_LED_DEFAULT, IWL_LED_RF_STATE, IWL_LED_BLINK, IWL_LED_DISABLE, }; /** * enum iwl_nvm_type - nvm formats * @IWL_NVM: the regular format * @IWL_NVM_EXT: extended NVM format * @IWL_NVM_SDP: NVM format used by 3168 series */ enum iwl_nvm_type { IWL_NVM, IWL_NVM_EXT, IWL_NVM_SDP, }; /* * This is the threshold value of plcp error rate per 100mSecs. It is * used to set and check for the validity of plcp_delta. */ #define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 #define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 /* TX queue watchdog timeouts in mSecs */ #define IWL_WATCHDOG_DISABLED 0 #define IWL_DEF_WD_TIMEOUT 2500 #define IWL_LONG_WD_TIMEOUT 10000 #define IWL_MAX_WD_TIMEOUT 120000 #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) #define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) #define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ IWL_TX_CSUM_NETIF_FLAGS_BZ | \ NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_INVALID 0xff #define ANT_A BIT(0) #define ANT_B BIT(1) #define ANT_C BIT(2) #define ANT_AB (ANT_A | ANT_B) #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) static inline u8 num_of_ant(u8 mask) { return !!((mask) & ANT_A) + !!((mask) & ANT_B) + !!((mask) & ANT_C); } /** * struct iwl_base_params - params not likely to change within a device family * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according * to the deviation to achieve the desired led frequency. * The detail algorithm is described in iwl-led.c * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. * @max_tfd_queue_size: max number of entries in tfd queue. */ struct iwl_base_params { unsigned int wd_timeout; u16 eeprom_size; u16 max_event_log_size; u8 pll_cfg:1, /* for iwl_pcie_apm_init() */ shadow_ram_support:1, shadow_reg_enable:1, pcie_l1_allowed:1, apmg_wake_up_wa:1, scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ u32 max_tfd_queue_size; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; }; /* * @stbc: support Tx STBC and 1*SS Rx STBC * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { u8 ht_greenfield_support:1, stbc:1, ldpc:1, use_rts_for_aggregation:1; u8 ht40_bands; }; /* * Tx-backoff threshold * @temperature: The threshold in Celsius * @backoff: The tx-backoff in uSec */ struct iwl_tt_tx_backoff { s32 temperature; u32 backoff; }; #define TT_TX_BACKOFF_SIZE 6 /** * struct iwl_tt_params - thermal throttling parameters * @ct_kill_entry: CT Kill entry threshold * @ct_kill_exit: CT Kill exit threshold * @ct_kill_duration: The time intervals (in uSec) in which the driver needs * to checks whether to exit CT Kill. * @dynamic_smps_entry: Dynamic SMPS entry threshold * @dynamic_smps_exit: Dynamic SMPS exit threshold * @tx_protection_entry: TX protection entry threshold * @tx_protection_exit: TX protection exit threshold * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. * @support_ct_kill: Support CT Kill? * @support_dynamic_smps: Support dynamic SMPS? * @support_tx_protection: Support tx protection? * @support_tx_backoff: Support tx-backoff? */ struct iwl_tt_params { u32 ct_kill_entry; u32 ct_kill_exit; u32 ct_kill_duration; u32 dynamic_smps_entry; u32 dynamic_smps_exit; u32 tx_protection_entry; u32 tx_protection_exit; struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; u8 support_ct_kill:1, support_dynamic_smps:1, support_tx_protection:1, support_tx_backoff:1; }; /* * information on how to parse the EEPROM */ #define EEPROM_REG_BAND_1_CHANNELS 0x08 #define EEPROM_REG_BAND_2_CHANNELS 0x26 #define EEPROM_REG_BAND_3_CHANNELS 0x42 #define EEPROM_REG_BAND_4_CHANNELS 0x5C #define EEPROM_REG_BAND_5_CHANNELS 0x74 #define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82 #define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92 #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 #define EEPROM_REGULATORY_BAND_NO_HT40 0 /* lower blocks contain EEPROM image and calibration data */ #define OTP_LOW_IMAGE_SIZE_2K (2 * 512 * sizeof(u16)) /* 2 KB */ #define OTP_LOW_IMAGE_SIZE_16K (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(u16)) /* 32 KB */ struct iwl_eeprom_params { const u8 regulatory_bands[7]; bool enhanced_txpower; }; /* Tx-backoff power threshold * @pwr: The power limit in mw * @backoff: The tx-backoff in uSec */ struct iwl_pwr_tx_backoff { u32 pwr; u32 backoff; }; enum iwl_cfg_trans_ltr_delay { IWL_CFG_TRANS_LTR_DELAY_NONE = 0, IWL_CFG_TRANS_LTR_DELAY_200US = 1, IWL_CFG_TRANS_LTR_DELAY_2500US = 2, IWL_CFG_TRANS_LTR_DELAY_1820US = 3, }; /** * struct iwl_cfg_trans - information needed to start the trans * * These values are specific to the device ID and do not change when * multiple configs are used for a single device ID. They values are * used, among other things, to boot the NIC so that the HW REV or * RFID can be read before deciding the remaining parameters to use. * * @base_params: pointer to basic parameters * @csr: csr flags and addresses that are different across devices * @device_family: the device family * @umac_prph_offset: offset to add to UMAC periphery address * @xtal_latency: power up latency to get the xtal stabilized * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY * @rf_id: need to read rf_id to determine the firmware image * @use_tfh: use TFH * @gen2: 22000 and on transport operation * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated * @low_latency_xtal: use the low latency xtal if supported * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay. + * @imr_enabled: use the IMR if supported. */ struct iwl_cfg_trans_params { const struct iwl_base_params *base_params; enum iwl_device_family device_family; u32 umac_prph_offset; u32 xtal_latency; u32 extra_phy_cfg_flags; u32 rf_id:1, use_tfh:1, gen2:1, mq_rx_supported:1, integrated:1, low_latency_xtal:1, bisr_workaround:1, - ltr_delay:2; + ltr_delay:2, + imr_enabled:1; }; /** * struct iwl_fw_mon_reg - FW monitor register info * @addr: register address * @mask: register mask */ struct iwl_fw_mon_reg { u32 addr; u32 mask; }; /** * struct iwl_fw_mon_regs - FW monitor registers * @write_ptr: write pointer register * @cycle_cnt: cycle count register * @cur_frag: current fragment in use */ struct iwl_fw_mon_regs { struct iwl_fw_mon_reg write_ptr; struct iwl_fw_mon_reg cycle_cnt; struct iwl_fw_mon_reg cur_frag; }; /** * struct iwl_cfg * @trans: the trans-specific configuration part * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre.ucode. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) * @max_data_size: The maximal length of the fw data section (only DVM) * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @tx_with_siso_diversity: 1x1 device with tx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section * @mac_addr_from_csr: read HW address from CSR registers at this offset * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @dccm_offset: offset from which DCCM begins * @dccm_len: length of DCCM (including runtime stack CCM) * @dccm2_offset: offset from which the second DCCM begins * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM * @vht_mu_mimo_supported: VHT MU-MIMO support * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported * @min_ba_txq_size: minimum number of slots required in a TX queue which * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) * @mac_addr_csr_base: CSR base register for MAC address access, if not set * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs * and/or the uCode API version instead. */ struct iwl_cfg { struct iwl_cfg_trans_params trans; /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; u32 max_data_size; u32 max_inst_size; netdev_features_t features; u32 dccm_offset; u32 dccm_len; u32 dccm2_offset; u32 dccm2_len; u32 smem_offset; u32 smem_len; u16 nvm_ver; u16 nvm_calib_ver; u32 rx_with_siso_diversity:1, tx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, high_temp:1, mac_addr_from_csr:10, lp_xtal_workaround:1, disable_dummy_notification:1, apmg_not_supported:1, vht_mu_mimo_supported:1, cdb:1, dbgc_supported:1, uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; u8 nvm_hw_section_num; u8 max_tx_agg_size; u8 ucode_api_max; u8 ucode_api_min; u16 num_rbds; u32 min_umac_error_event_table; u32 d3_debug_data_base_addr; u32 d3_debug_data_length; u32 min_txq_size; u32 gp2_reg_addr; u32 min_ba_txq_size; const struct iwl_fw_mon_regs mon_dram_regs; const struct iwl_fw_mon_regs mon_smem_regs; const struct iwl_fw_mon_regs mon_dbgi_regs; }; #define IWL_CFG_ANY (~0) #define IWL_CFG_MAC_TYPE_PU 0x31 #define IWL_CFG_MAC_TYPE_PNJ 0x32 #define IWL_CFG_MAC_TYPE_TH 0x32 #define IWL_CFG_MAC_TYPE_QU 0x33 #define IWL_CFG_MAC_TYPE_QUZ 0x35 #define IWL_CFG_MAC_TYPE_QNJ 0x36 #define IWL_CFG_MAC_TYPE_SO 0x37 #define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_MAC_TYPE_GL 0x47 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 #define IWL_CFG_RF_TYPE_JF2 0x105 #define IWL_CFG_RF_TYPE_JF1 0x108 #define IWL_CFG_RF_TYPE_HR2 0x10A #define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_TYPE_GF 0x10D #define IWL_CFG_RF_TYPE_MR 0x110 #define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 #define IWL_CFG_RF_ID_JF 0x3 #define IWL_CFG_RF_ID_JF1 0x6 #define IWL_CFG_RF_ID_JF1_DIV 0xA #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 #define IWL_CFG_NO_160 0x1 #define IWL_CFG_160 0x0 #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 #define IWL_CFG_NO_CDB 0x0 #define IWL_CFG_CDB 0x1 #define IWL_CFG_NO_JACKET 0x0 #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) #define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { u16 device; u16 subdevice; u16 mac_type; u16 rf_type; u8 mac_step; u8 rf_id; u8 no_160; u8 cores; u8 cdb; u8 jacket; const struct iwl_cfg *cfg; const char *name; }; /* * This list declares the config structures for all devices. */ extern const struct iwl_cfg_trans_params iwl9000_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg; extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; extern const char iwl9270_name[]; extern const char iwl9461_name[]; extern const char iwl9462_name[]; extern const char iwl9560_name[]; extern const char iwl9162_160_name[]; extern const char iwl9260_160_name[]; extern const char iwl9270_160_name[]; extern const char iwl9461_160_name[]; extern const char iwl9462_160_name[]; extern const char iwl9560_160_name[]; extern const char iwl9260_killer_1550_name[]; extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; extern const char iwl_ax203_name[]; extern const char iwl_ax204_name[]; extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; extern const char iwl_ax201_killer_1650s_name[]; extern const char iwl_ax201_killer_1650i_name[]; extern const char iwl_ax210_killer_1675w_name[]; extern const char iwl_ax210_killer_1675x_name[]; extern const char iwl9560_killer_1550i_160_name[]; extern const char iwl9560_killer_1550s_160_name[]; extern const char iwl_ax211_killer_1675s_name[]; extern const char iwl_ax211_killer_1675i_name[]; extern const char iwl_ax411_killer_1690s_name[]; extern const char iwl_ax411_killer_1690i_name[]; extern const char iwl_ax211_name[]; extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_bz_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; extern const struct iwl_cfg iwl5350_agn_cfg; extern const struct iwl_cfg iwl5100_bgn_cfg; extern const struct iwl_cfg iwl5100_abg_cfg; extern const struct iwl_cfg iwl5150_agn_cfg; extern const struct iwl_cfg iwl5150_abg_cfg; extern const struct iwl_cfg iwl6005_2agn_cfg; extern const struct iwl_cfg iwl6005_2abg_cfg; extern const struct iwl_cfg iwl6005_2bg_cfg; extern const struct iwl_cfg iwl6005_2agn_sff_cfg; extern const struct iwl_cfg iwl6005_2agn_d_cfg; extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; extern const struct iwl_cfg iwl1030_bgn_cfg; extern const struct iwl_cfg iwl1030_bg_cfg; extern const struct iwl_cfg iwl6030_2agn_cfg; extern const struct iwl_cfg iwl6030_2abg_cfg; extern const struct iwl_cfg iwl6030_2bgn_cfg; extern const struct iwl_cfg iwl6030_2bg_cfg; extern const struct iwl_cfg iwl6000i_2agn_cfg; extern const struct iwl_cfg iwl6000i_2abg_cfg; extern const struct iwl_cfg iwl6000i_2bg_cfg; extern const struct iwl_cfg iwl6000_3agn_cfg; extern const struct iwl_cfg iwl6050_2agn_cfg; extern const struct iwl_cfg iwl6050_2abg_cfg; extern const struct iwl_cfg iwl6150_bgn_cfg; extern const struct iwl_cfg iwl6150_bg_cfg; extern const struct iwl_cfg iwl1000_bgn_cfg; extern const struct iwl_cfg iwl1000_bg_cfg; extern const struct iwl_cfg iwl100_bgn_cfg; extern const struct iwl_cfg iwl100_bg_cfg; extern const struct iwl_cfg iwl130_bgn_cfg; extern const struct iwl_cfg iwl130_bg_cfg; extern const struct iwl_cfg iwl2000_2bgn_cfg; extern const struct iwl_cfg iwl2000_2bgn_d_cfg; extern const struct iwl_cfg iwl2030_2bgn_cfg; extern const struct iwl_cfg iwl6035_2agn_cfg; extern const struct iwl_cfg iwl6035_2agn_sff_cfg; extern const struct iwl_cfg iwl105_bgn_cfg; extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) extern const struct iwl_cfg iwl7260_2ac_cfg; extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; extern const struct iwl_cfg iwl7260_2n_cfg; extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl3168_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; extern const struct iwl_cfg iwl7265d_2ac_cfg; extern const struct iwl_cfg iwl7265d_2n_cfg; extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl_qu_b0_hr1_b0; extern const struct iwl_cfg iwl_qu_c0_hr1_b0; extern const struct iwl_cfg iwl_quz_a0_hr1_b0; extern const struct iwl_cfg iwl_qu_b0_hr_b0; extern const struct iwl_cfg iwl_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax201_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0; extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; extern const struct iwl_cfg iwl_cfg_snj_hr_b0; extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0; extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0; extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0; extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h b/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h index 5adf485db38e..b84884034c74 100644 --- a/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h +++ b/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h @@ -1,267 +1,269 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2021 Intel Corporation + * Copyright (C) 2018, 2020-2022 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ #include "iwl-context-info.h" #define CSR_CTXT_INFO_BOOT_CTRL 0x0 #define CSR_CTXT_INFO_ADDR 0x118 #define CSR_IML_DATA_ADDR 0x120 #define CSR_IML_SIZE_ADDR 0x128 #define CSR_IML_RESP_ADDR 0x12c /* Set bit for enabling automatic function boot */ #define CSR_AUTO_FUNC_BOOT_ENA BIT(1) /* Set bit for initiating function boot */ #define CSR_AUTO_FUNC_INIT BIT(7) /** * enum iwl_prph_scratch_mtr_format - tfd size configuration * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd */ enum iwl_prph_scratch_mtr_format { IWL_PRPH_MTR_FORMAT_16B = 0x0, IWL_PRPH_MTR_FORMAT_32B = 0x40000, IWL_PRPH_MTR_FORMAT_64B = 0x80000, IWL_PRPH_MTR_FORMAT_256B = 0xC0000, }; /** * enum iwl_prph_scratch_flags - PRPH scratch control flags + * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated * in hwm config. * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for * multicomm. * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for * completion descriptor, 1 for responses (legacy) * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, * 3: 256 bit. * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K * appropriately; use the below values for this. * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size */ enum iwl_prph_scratch_flags { + IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1), IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8), IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9), IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10), IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11), IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, }; /* * struct iwl_prph_scratch_version - version structure * @mac_id: SKU and revision id * @version: prph scratch information version id * @size: the size of the context information in DWs * @reserved: reserved */ struct iwl_prph_scratch_version { __le16 mac_id; __le16 version; __le16 size; __le16 reserved; } __packed; /* PERIPH_SCRATCH_VERSION_S */ /* * struct iwl_prph_scratch_control - control structure * @control_flags: context information flags see &enum iwl_prph_scratch_flags * @reserved: reserved */ struct iwl_prph_scratch_control { __le32 control_flags; __le32 reserved; } __packed; /* PERIPH_SCRATCH_CONTROL_S */ /* * struct iwl_prph_scratch_pnvm_cfg - ror config * @pnvm_base_addr: PNVM start address * @pnvm_size: PNVM size in DWs * @reserved: reserved */ struct iwl_prph_scratch_pnvm_cfg { __le64 pnvm_base_addr; __le32 pnvm_size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */ /* * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address * @hwm_size: hwm size in DWs * @debug_token_config: debug preset */ struct iwl_prph_scratch_hwm_cfg { __le64 hwm_base_addr; __le32 hwm_size; __le32 debug_token_config; } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ /* * struct iwl_prph_scratch_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @reserved: reserved */ struct iwl_prph_scratch_rbd_cfg { __le64 free_rbd_addr; __le32 reserved; } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ /* * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table * @base_addr: reduce power table address * @size: table size in dwords */ struct iwl_prph_scratch_uefi_cfg { __le64 base_addr; __le32 size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */ /* * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config * @version: version information of context info and HW * @control: control flags of FH configurations * @pnvm_cfg: ror configuration * @hwm_cfg: hwm configuration * @rbd_cfg: default RX queue configuration */ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_version version; struct iwl_prph_scratch_control control; struct iwl_prph_scratch_pnvm_cfg pnvm_cfg; struct iwl_prph_scratch_hwm_cfg hwm_cfg; struct iwl_prph_scratch_rbd_cfg rbd_cfg; struct iwl_prph_scratch_uefi_cfg reduce_power_cfg; } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ /* * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM * @reserved: reserved */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; __le32 reserved[12]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ /* * struct iwl_prph_info - peripheral information * @boot_stage_mirror: reflects the value in the Boot Stage CSR register * @ipc_status_mirror: reflects the value in the IPC Status CSR register * @sleep_notif: indicates the peripheral sleep status * @reserved: reserved */ struct iwl_prph_info { __le32 boot_stage_mirror; __le32 ipc_status_mirror; __le32 sleep_notif; __le32 reserved; } __packed; /* PERIPH_INFO_S */ /* * struct iwl_context_info_gen3 - device INIT configuration * @version: version of the context information * @size: size of context information in DWs * @config: context in which the peripheral would execute - a subset of * capability csr register published by the peripheral * @prph_info_base_addr: the peripheral information structure start address * @cr_head_idx_arr_base_addr: the completion ring head index array * start address * @tr_tail_idx_arr_base_addr: the transfer ring tail index array * start address * @cr_tail_idx_arr_base_addr: the completion ring tail index array * start address * @tr_head_idx_arr_base_addr: the transfer ring head index array * start address * @cr_idx_arr_size: number of entries in the completion ring index array * @tr_idx_arr_size: number of entries in the transfer ring index array * @mtr_base_addr: the message transfer ring start address * @mcr_base_addr: the message completion ring start address * @mtr_size: number of entries which the message transfer ring can hold * @mcr_size: number of entries which the message completion ring can hold * @mtr_doorbell_vec: the doorbell vector associated with the message * transfer ring * @mcr_doorbell_vec: the doorbell vector associated with the message * completion ring * @mtr_msi_vec: the MSI which shall be generated by the peripheral after * completing a transfer descriptor in the message transfer ring * @mcr_msi_vec: the MSI which shall be generated by the peripheral after * completing a completion descriptor in the message completion ring * @mtr_opt_header_size: the size of the optional header in the transfer * descriptor associated with the message transfer ring in DWs * @mtr_opt_footer_size: the size of the optional footer in the transfer * descriptor associated with the message transfer ring in DWs * @mcr_opt_header_size: the size of the optional header in the completion * descriptor associated with the message completion ring in DWs * @mcr_opt_footer_size: the size of the optional footer in the completion * descriptor associated with the message completion ring in DWs * @msg_rings_ctrl_flags: message rings control flags * @prph_info_msi_vec: the MSI which shall be generated by the peripheral * after updating the Peripheral Information structure * @prph_scratch_base_addr: the peripheral scratch structure start address * @prph_scratch_size: the size of the peripheral scratch structure in DWs * @reserved: reserved */ struct iwl_context_info_gen3 { __le16 version; __le16 size; __le32 config; __le64 prph_info_base_addr; __le64 cr_head_idx_arr_base_addr; __le64 tr_tail_idx_arr_base_addr; __le64 cr_tail_idx_arr_base_addr; __le64 tr_head_idx_arr_base_addr; __le16 cr_idx_arr_size; __le16 tr_idx_arr_size; __le64 mtr_base_addr; __le64 mcr_base_addr; __le16 mtr_size; __le16 mcr_size; __le16 mtr_doorbell_vec; __le16 mcr_doorbell_vec; __le16 mtr_msi_vec; __le16 mcr_msi_vec; u8 mtr_opt_header_size; u8 mtr_opt_footer_size; u8 mcr_opt_header_size; u8 mcr_opt_footer_size; __le16 msg_rings_ctrl_flags; __le16 prph_info_msi_vec; __le64 prph_scratch_base_addr; __le32 prph_scratch_size; __le32 reserved; } __packed; /* IPC_CONTEXT_INFO_S */ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive); int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, const void *data, u32 len); int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, const void *data, u32 len); #endif /* __iwl_context_info_file_gen3_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-csr.h b/sys/contrib/dev/iwlwifi/iwl-csr.h index c0a18e820b51..3e1f011e93aa 100644 --- a/sys/contrib/dev/iwlwifi/iwl-csr.h +++ b/sys/contrib/dev/iwlwifi/iwl-csr.h @@ -1,640 +1,641 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #ifndef __iwl_csr_h__ #define __iwl_csr_h__ /* * CSR (control and status registers) * * CSR registers are mapped directly into PCI bus space, and are accessible * whenever platform supplies power to device, even when device is in * low power states due to driver-invoked device resets * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * * Use iwl_write32() and iwl_read32() family to access these registers; * these provide simple PCI bus access, without waking up the MAC. * Do not use iwl_write_direct32() family for these registers; * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * The MAC (uCode processor, etc.) does not need to be powered up for accessing * the CSR registers. * * NOTE: Device does need to be awake in order to read this memory * via CSR_EEPROM and CSR_OTP registers */ #define CSR_BASE (0x000) #define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ #define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ #define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ #define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ #define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ #define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ #define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_GP_CNTRL (CSR_BASE+0x024) #define CSR_FUNC_SCRATCH (CSR_BASE+0x02c) /* Scratch register - used for FW dbg */ /* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) /* * Hardware revision info * Bit fields: * 31-16: Reserved * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D * 1-0: "Dash" (-) value, as in A-1, etc. */ #define CSR_HW_REV (CSR_BASE+0x028) /* * RF ID revision info * Bit fields: * 31:24: Reserved (set to 0x0) * 23:12: Type * 11:8: Step (A - 0x0, B - 0x1, etc) * 7:4: Dash * 3:0: Flavor */ #define CSR_HW_RF_ID (CSR_BASE+0x09c) /* * EEPROM and OTP (one-time-programmable) memory reads * * NOTE: Device must be awake, initialized via apm_ops.init(), * in order to read. */ #define CSR_EEPROM_REG (CSR_BASE+0x02c) #define CSR_EEPROM_GP (CSR_BASE+0x030) #define CSR_OTP_GP_REG (CSR_BASE+0x034) #define CSR_GIO_REG (CSR_BASE+0x03C) #define CSR_GP_UCODE_REG (CSR_BASE+0x048) #define CSR_GP_DRIVER_REG (CSR_BASE+0x050) /* * UCODE-DRIVER GP (general purpose) mailbox registers. * SET/CLR registers set/clear bit(s) if "1" is written. */ #define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) #define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) #define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) #define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) #define CSR_MBOX_SET_REG (CSR_BASE + 0x88) #define CSR_LED_REG (CSR_BASE+0x094) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) #define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE + 0x0A8) /* 6000 and up */ #define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE BIT(20) #define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC) #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF /* LTR control (since IWL_DEVICE_FAMILY_22000) */ #define CSR_LTR_LONG_VAL_AD (CSR_BASE + 0x0D4) #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000 #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE 0x1c000000 #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL 0x03ff0000 #define CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000 #define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE 0x00001c00 #define CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff #define CSR_LTR_LONG_VAL_AD_SCALE_USEC 2 /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) #define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114) #define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3 #define CSR_IPC_SLEEP_CONTROL_RESUME 0 /* Doorbell - since Bz * connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only) */ #define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) #define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19) /* Analog phase-lock-loop configuration */ #define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) /* * CSR HW resources monitor registers */ #define CSR_MONITOR_CFG_REG (CSR_BASE+0x214) #define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228) #define CSR_MONITOR_XTAL_RESOURCES (0x00000010) /* * CSR Hardware Revision Workaround Register. Indicates hardware rev; * "step" determines CCK backoff for txpower calculation. * See also CSR_HW_REV register. * Bit fields: * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step * 1-0: "Dash" (-) value, as in C-1, etc. */ #define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) #define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) #define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) /* * Scratch register initial configuration - this is set on init, and read * during a error FW error. */ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) #define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) #define CSR_HW_IF_CONFIG_REG_D3_DEBUG (0x00000200) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) #define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0) #define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2) #define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6) #define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10) #define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) #define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) #define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ #define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) #define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ #define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ #define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ #define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ #define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ #define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ #define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_FH_TX | \ CSR_INT_BIT_SW_ERR | \ CSR_INT_BIT_RF_KILL | \ CSR_INT_BIT_SW_RX | \ CSR_INT_BIT_WAKEUP | \ CSR_INT_BIT_ALIVE | \ CSR_INT_BIT_RX_PERIODIC) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ #define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ #define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ #define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ #define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ #define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ #define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL0) #define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL0) /* GPIO */ #define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) #define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) #define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) /* RESET */ #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) #define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) /* * GP (general purpose) CONTROL REGISTER * Bit fields: * 27: HW_RF_KILL_SW * Indicates state of (platform's) hardware RF-Kill switch * 26-24: POWER_SAVE_TYPE * Indicates current power-saving mode: * 000 -- No power saving * 001 -- MAC power-down * 010 -- PHY (radio) power-down * 011 -- Error * 10: XTAL ON request * 9-6: SYS_CONFIG * Indicates current system configuration, reflecting pins on chip * as forced high/low by device circuit board. * 4: GOING_TO_SLEEP * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. * 3: MAC_ACCESS_REQ * Host sets this to request and maintain MAC wakeup, to allow host * access to device-internal resources. Host must wait for * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR * device registers. * 2: INIT_DONE * Host sets this to put device into fully operational D0 power mode. * Host resets this after SW_RESET to put device into low power mode. * 0: MAC_CLOCK_READY * Indicates MAC (ucode processor, etc.) is powered up and can run. * Internal resources are accessible. * NOTE: This does not indicate that the processor is actually running. * NOTE: This does not indicate that device has completed * init or post-power-down restore of internal SRAM memory. * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that * SRAM is restored and uCode is in normal operation mode. * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and * do not need to save/restore it. * NOTE: After device reset, this bit remains "0" until host sets * INIT_DONE */ #define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) #define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) #define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) #define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) /* From Bz we use these instead during init/reset flow */ #define CSR_GP_CNTRL_REG_FLAG_MAC_INIT BIT(6) #define CSR_GP_CNTRL_REG_FLAG_ROM_START BIT(7) #define CSR_GP_CNTRL_REG_FLAG_MAC_STATUS BIT(20) #define CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ BIT(21) #define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS BIT(28) #define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ BIT(29) #define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ #define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH) #define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4) /* HW RFID */ #define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0) #define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4) #define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8) #define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12) #define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28) #define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29) /** * hw_rev values */ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, SILICON_Z_STEP = 0xf, }; #define CSR_HW_REV_TYPE_MSK (0x000FFF0) #define CSR_HW_REV_TYPE_5300 (0x0000020) #define CSR_HW_REV_TYPE_5350 (0x0000030) #define CSR_HW_REV_TYPE_5100 (0x0000050) #define CSR_HW_REV_TYPE_5150 (0x0000040) #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) #define CSR_HW_REV_TYPE_6150 (0x0000084) #define CSR_HW_REV_TYPE_6x05 (0x00000B0) #define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_2x30 (0x00000C0) #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_3160 (0x0000164) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361) #define CSR_HW_REV_TYPE_QU_B0 (0x0000331) #define CSR_HW_REV_TYPE_QU_C0 (0x0000332) #define CSR_HW_REV_TYPE_QUZ (0x0000351) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) /* HW_RF CHIP STEP */ #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) #define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ #define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) #define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) /* One-time-programmable memory general purpose reg */ #define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ #define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ #define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ #define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ /* GP REG */ #define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) /* CSR GIO */ #define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 * Host driver and uCode write and/or read this register to communicate with * each other. * Bit fields: * 4: UCODE_DISABLE * Host sets this to request permanent halt of uCode, same as * sending CARD_STATE command with "halt" bit set. * 3: CT_KILL_EXIT * Host sets this to request exit from CT_KILL state, i.e. host thinks * device temperature is low enough to continue normal operation. * 2: CMD_BLOCKED * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) * to release uCode to clear all Tx and command queues, enter * unassociated mode, and power down. * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. * 1: SW_BIT_RFKILL * Host sets this when issuing CARD_STATE command to request * device sleep. * 0: MAC_SLEEP * uCode sets this when preparing a power-saving power-down. * uCode resets this when power-up is complete and SRAM is sane. * NOTE: device saves internal SRAM data to host when powering down, * and must restore this data after powering back up. * MAC_SLEEP is the best indication that restore is complete. * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and * do not need to save/restore it. */ #define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) #define CSR_UCODE_SW_BIT_RFKILL (0x00000002) #define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) #define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) #define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) /* GP Driver */ #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) #define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) #define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) /* LED */ #define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) #define CSR_LED_REG_TURN_ON (0x60) #define CSR_LED_REG_TURN_OFF (0x20) /* ANA_PLL */ #define CSR50_ANA_PLL_CFG_VAL (0x00880300) /* HPET MEM debug */ #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) /* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) /* * SHR target access (Shared block memory space) * * Shared internal registers can be accessed directly from PCI bus through SHR * arbiter without need for the MAC HW to be powered up. This is possible due to * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers. * * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW * need not be powered up so no "grab inc access" is required. */ /* * Registers for accessing shared registers (e.g. SHR_APMG_GP1, * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC), * first, write to the control register: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access) * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0]. * * To write the register, first, write to the data register * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access) */ #define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec) #define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4) /* * HBUS (Host-side Bus) * * HBUS registers are mapped directly into PCI bus space, but are used * to indirectly access device's internal memory or registers that * may be powered-down. * * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * to make sure the MAC (uCode processor, etc.) is powered up for accessing * internal resources. * * Do not use iwl_write32()/iwl_read32() family to access these registers; * these provide only simple PCI bus access, without waking up the MAC. */ #define HBUS_BASE (0x400) /* * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM * structures, error log, event log, verifying uCode load). * First write to address register, then read from or write to data register * to complete the job. Once the address register is set up, accesses to * data registers auto-increment the address by one dword. * Bit usage for address registers (read or write): * 0-31: memory address within device */ #define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) #define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) #define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) #define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) /* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ #define HBUS_TARG_MBX_C (HBUS_BASE+0x030) #define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) /* * Registers for accessing device's internal peripheral registers * (e.g. SCD, BSM, etc.). First write to address register, * then read from or write to data register to complete the job. * Bit usage for address registers (read or write): * 0-15: register address (offset) within device * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) */ #define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) #define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) #define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) /* Used to enable DBGM */ #define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) /* * Per-Tx-queue write pointer (index, really!) * Indicates index to next TFD that driver will fill (1 past latest filled). * Bit usage: * 0-7: queue write index * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) /* This register is common for Tx and Rx, Rx queues start from 512 */ #define HBUS_TARG_WRPTR_Q_SHIFT (16) #define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT) /********************************************************** * CSR values **********************************************************/ /* * host interrupt timeout value * used with setting interrupt coalescing timer * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit * * default interrupt coalescing timer is 64 x 32 = 2048 usecs */ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) #define IWL_HOST_INT_TIMEOUT_DEF (0x40) #define IWL_HOST_INT_TIMEOUT_MIN (0x0) #define IWL_HOST_INT_OPER_MODE BIT(31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * *****************************************************************************/ /* Diode Results Register Structure: */ enum dtd_diode_reg { DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ /* Those are the masks INSIDE the flags bit-field: */ DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ }; /***************************************************************************** * MSIX related registers * *****************************************************************************/ #define CSR_MSIX_BASE (0x2000) #define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800) #define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804) #define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808) #define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C) #define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810) #define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880) #define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890) #define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000) #define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause)) #define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause)) #define MSIX_FH_INT_CAUSES_Q(q) (q) /* * Causes for the FH register interrupts */ enum msix_fh_int_causes { MSIX_FH_INT_CAUSES_Q0 = BIT(0), MSIX_FH_INT_CAUSES_Q1 = BIT(1), MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), MSIX_FH_INT_CAUSES_S2D = BIT(19), MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), }; /* The low 16 bits are for rx data queue indication */ #define MSIX_FH_INT_CAUSES_DATA_QUEUE 0xffff /* * Causes for the HW register interrupts */ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(1), MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25), MSIX_HW_INT_CAUSES_REG_SCD = BIT(26), MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27), MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29), MSIX_HW_INT_CAUSES_REG_HAP = BIT(30), }; #define MSIX_MIN_INTERRUPT_VECTORS 2 #define MSIX_AUTO_CLEAR_CAUSE 0 #define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) /***************************************************************************** * HW address related registers * *****************************************************************************/ #define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr) #define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00) #define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04) #define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08) #define CSR_MAC_ADDR1_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x0c) #endif /* !__iwl_csr_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.h b/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.h index 79287708bd6e..128059ca77e6 100644 --- a/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.h +++ b/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.h @@ -1,74 +1,76 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ #include #include #include #include +#define IWL_DBG_TLV_MAX_PRESET 15 + /** * struct iwl_dbg_tlv_node - debug TLV node * @list: list of &struct iwl_dbg_tlv_node * @tlv: debug TLV */ struct iwl_dbg_tlv_node { struct list_head list; struct iwl_ucode_tlv tlv; }; /** * union iwl_dbg_tlv_tp_data - data that is given in a time point * @fw_pkt: a packet received from the FW */ union iwl_dbg_tlv_tp_data { struct iwl_rx_packet *fw_pkt; }; /** * struct iwl_dbg_tlv_time_point_data * @trig_list: list of triggers * @active_trig_list: list of active triggers * @hcmd_list: list of host commands * @config_list: list of configuration */ struct iwl_dbg_tlv_time_point_data { struct list_head trig_list; struct list_head active_trig_list; struct list_head hcmd_list; struct list_head config_list; }; struct iwl_trans; struct iwl_fw_runtime; void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans); void iwl_dbg_tlv_free(struct iwl_trans *trans); void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext); void iwl_dbg_tlv_init(struct iwl_trans *trans); void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data, bool sync); static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data) { _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, false); } static inline void iwl_dbg_tlv_time_point_sync(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data) { _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, true); } void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); #endif /* __iwl_dbg_tlv_h__*/ diff --git a/sys/contrib/dev/iwlwifi/iwl-drv.c b/sys/contrib/dev/iwlwifi/iwl-drv.c index f372d5c5ec87..263126a739b5 100644 --- a/sys/contrib/dev/iwlwifi/iwl-drv.c +++ b/sys/contrib/dev/iwlwifi/iwl-drv.c @@ -1,1994 +1,2003 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX iwlwifi_ #endif #include #include #include #include #include #include "iwl-drv.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-agn-hw.h" #include "fw/img.h" #include "iwl-dbg-tlv.h" #include "iwl-config.h" #include "iwl-modparams.h" #include "fw/api/alive.h" #include "fw/api/mac.h" /****************************************************************************** * * module boiler plate * ******************************************************************************/ #if defined(__linux__) #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" MODULE_LICENSE("GPL"); #elif defined(__FreeBSD__) #define DRV_DESCRIPTION "Intel(R) Wireless WiFi based driver for FreeBSD" MODULE_LICENSE("BSD"); MODULE_VERSION(if_iwlwifi, 1); MODULE_DEPEND(if_iwlwifi, linuxkpi, 1, 1, 1); MODULE_DEPEND(if_iwlwifi, linuxkpi_wlan, 1, 1, 1); #endif MODULE_DESCRIPTION(DRV_DESCRIPTION); #ifdef CONFIG_IWLWIFI_DEBUGFS static struct dentry *iwl_dbgfs_root; #endif /** * struct iwl_drv - drv common data * @list: list of drv structures using this opmode * @fw: the iwl_fw structure * @op_mode: the running op_mode * @trans: transport layer * @dev: for debug prints only * @fw_index: firmware revision to try loading * @firmware_name: composite filename of ucode file to load * @request_firmware_complete: the firmware has been obtained from user space * @dbgfs_drv: debugfs root directory entry * @dbgfs_trans: debugfs transport directory entry * @dbgfs_op_mode: debugfs op_mode directory entry */ struct iwl_drv { struct list_head list; struct iwl_fw fw; struct iwl_op_mode *op_mode; struct iwl_trans *trans; struct device *dev; int fw_index; /* firmware we're trying to load */ char firmware_name[64]; /* name of firmware file to load */ struct completion request_firmware_complete; #ifdef CONFIG_IWLWIFI_DEBUGFS struct dentry *dbgfs_drv; struct dentry *dbgfs_trans; struct dentry *dbgfs_op_mode; #endif }; enum { DVM_OP_MODE, MVM_OP_MODE, }; /* Protects the table contents, i.e. the ops pointer & drv list */ static DEFINE_MUTEX(iwlwifi_opmode_table_mtx); static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ struct list_head drv; /* list of devices using this op_mode */ } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, }; #define IWL_DEFAULT_SCAN_CHANNELS 40 /* * struct fw_sec: Just for the image parsing process. * For the fw storage we are using struct fw_desc. */ struct fw_sec { const void *data; /* the sec data */ size_t size; /* section size */ u32 offset; /* offset of writing in the device */ }; static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) { vfree(desc->data); desc->data = NULL; desc->len = 0; } static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) { int i; for (i = 0; i < img->num_sec; i++) iwl_free_fw_desc(drv, &img->sec[i]); kfree(img->sec); } static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; kfree(drv->fw.dbg.dest_tlv); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) kfree(drv->fw.dbg.conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) kfree(drv->fw.dbg.trigger_tlv[i]); kfree(drv->fw.dbg.mem_tlv); kfree(drv->fw.iml); kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); /* clear the data for the aborted load case */ memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, struct fw_sec *sec) { void *data; desc->data = NULL; if (!sec || !sec->size) return -EINVAL; data = vmalloc(sec->size); if (!data) return -ENOMEM; desc->len = sec->size; desc->offset = sec->offset; memcpy(data, sec->data, desc->len); desc->data = data; return 0; } static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && (drv->trans->hw_rev_step != SILICON_B_STEP && drv->trans->hw_rev_step != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev); return -EINVAL; } if (first) { drv->fw_index = cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { drv->fw_index--; sprintf(tag, "%d", drv->fw_index); } if (drv->fw_index < cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, cfg->ucode_api_max); } else { IWL_ERR(drv, "minimum version required: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_min); IWL_ERR(drv, "maximum version supported: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n"); return -ENOENT; } snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", cfg->fw_name_pre, tag); IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, drv->trans->dev, GFP_KERNEL, drv, iwl_req_fw_callback); } struct fw_img_parsing { struct fw_sec *sec; int sec_counter; }; /* * struct fw_sec_parsing: to extract fw section and it's offset from tlv */ struct fw_sec_parsing { __le32 offset; const u8 data[]; } __packed; /** * struct iwl_tlv_calib_data - parse the default calib data from TLV * * @ucode_type: the uCode to which the following default calib relates. * @calib: default calibrations. */ struct iwl_tlv_calib_data { __le32 ucode_type; struct iwl_tlv_calib_ctrl calib; } __packed; struct iwl_firmware_pieces { struct fw_img_parsing img[IWL_UCODE_TYPE_MAX]; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; /* FW debug data parsed for driver usage */ bool dbg_dest_tlv_init; const u8 *dbg_dest_ver; union { const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; }; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; }; /* * These functions are just to extract uCode section data from the pieces * structure. */ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return &pieces->img[type].sec[sec]; } static void alloc_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { struct fw_img_parsing *img = &pieces->img[type]; struct fw_sec *sec_memory; int size = sec + 1; size_t alloc_size = sizeof(*img->sec) * size; if (img->sec && img->sec_counter >= size) return; sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec_memory) return; img->sec = sec_memory; img->sec_counter = size; } static void set_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, const void *data) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].data = data; } static void set_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, size_t size) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].size = size; } static size_t get_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return pieces->img[type].sec[sec].size; } static void set_sec_offset(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, u32 offset) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].offset = offset; } -static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) -{ - int i, j; - const struct iwl_fw_cscheme_list *l = - (const struct iwl_fw_cscheme_list *)data; - const struct iwl_fw_cipher_scheme *fwcs; - - if (len < sizeof(*l) || - len < sizeof(l->size) + l->size * sizeof(l->cs[0])) - return -EINVAL; - - for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { - fwcs = &l->cs[j]; - - /* we skip schemes with zero cipher suite selector */ - if (!fwcs->cipher) - continue; - - fw->cs[j++] = *fwcs; - } - - return 0; -} - /* * Gets uCode section from tlv. */ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, const void *data, enum iwl_ucode_type type, int size) { struct fw_img_parsing *img; struct fw_sec *sec; const struct fw_sec_parsing *sec_parse; size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; sec_parse = (const struct fw_sec_parsing *)data; img = &pieces->img[type]; alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); sec = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec) return -ENOMEM; img->sec = sec; sec = &img->sec[img->sec_counter]; sec->offset = le32_to_cpu(sec_parse->offset); sec->data = sec_parse->data; sec->size = size - sizeof(sec_parse->offset); ++img->sec_counter; return 0; } static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) { const struct iwl_tlv_calib_data *def_calib = (const struct iwl_tlv_calib_data *)data; u32 ucode_type = le32_to_cpu(def_calib->ucode_type); if (ucode_type >= IWL_UCODE_TYPE_MAX) { IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", ucode_type); return -EINVAL; } drv->fw.default_calib[ucode_type].flow_trigger = def_calib->calib.flow_trigger; drv->fw.default_calib[ucode_type].event_trigger = def_calib->calib.event_trigger; return 0; } static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_api *ucode_api = (const void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); u32 api_flags = le32_to_cpu(ucode_api->api_flags); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { IWL_WARN(drv, "api flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_api); } } static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_capa *ucode_capa = (const void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); u32 api_flags = le32_to_cpu(ucode_capa->api_capa); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { IWL_WARN(drv, "capa flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_capa); } } static const char *iwl_reduced_fw_name(struct iwl_drv *drv) { const char *name = drv->firmware_name; if (strncmp(name, "iwlwifi-", 8) == 0) name += 8; return name; } static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces) { const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data; u32 api_ver, hdr_size, build; char buildstr[25]; const u8 *src; drv->fw.ucode_ver = le32_to_cpu(ucode->ver); api_ver = IWL_UCODE_API(drv->fw.ucode_ver); switch (api_ver) { default: hdr_size = 28; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = le32_to_cpu(ucode->u.v2.build); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.init_data_size)); src = ucode->u.v2.data; break; case 0: case 1: case 2: hdr_size = 24; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = 0; set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.init_data_size)); src = ucode->u.v1.data; break; } if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s %s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr, iwl_reduced_fw_name(drv)); /* Verify size of file vs. image size info in file's header */ if (ucode_raw->size != hdr_size + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) { IWL_ERR(drv, "uCode file size %d does not match expected size\n", (int)ucode_raw->size); return -EINVAL; } set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); return 0; } static void iwl_drv_set_dump_exclude(struct iwl_drv *drv, enum iwl_ucode_tlv_type tlv_type, const void *tlv_data, u32 tlv_len) { const struct iwl_fw_dump_exclude *fw = tlv_data; struct iwl_dump_exclude *excl; if (tlv_len < sizeof(*fw)) return; if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) { excl = &drv->fw.dump_excl[0]; /* second time we find this, it's for WoWLAN */ if (excl->addr) excl = &drv->fw.dump_excl_wowlan[0]; } else if (fw_has_capa(&drv->fw.ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) { /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */ excl = &drv->fw.dump_excl[0]; } else { /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */ excl = &drv->fw.dump_excl_wowlan[0]; } if (excl->addr) excl++; if (excl->addr) { IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n"); return; } excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL; excl->size = le32_to_cpu(fw->size); } static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_region_tlv *region; u32 length = le32_to_cpu(tlv->length); u32 addr; if (length < offsetof(typeof(*region), special_mem) + sizeof(region->special_mem)) return; region = (const void *)tlv->data; addr = le32_to_cpu(region->special_mem.base_addr); addr += le32_to_cpu(region->special_mem.offset); addr &= ~FW_ADDR_CACHE_CONTROL; if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY) return; switch (region->sub_type) { case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE: drv->trans->dbg.umac_error_event_table = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_UMAC; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE: drv->trans->dbg.lmac_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE: drv->trans->dbg.lmac_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC2; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE: drv->trans->dbg.tcm_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_TCM1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE: drv->trans->dbg.tcm_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_TCM2; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE: drv->trans->dbg.rcm_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_RCM1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE: drv->trans->dbg.rcm_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_RCM2; break; default: break; } } static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, struct iwl_ucode_capabilities *capa, bool *usniffer_images) { const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data; const struct iwl_ucode_tlv *tlv; size_t len = ucode_raw->size; const u8 *data; u32 tlv_len; u32 usniffer_img; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; char buildstr[25]; u32 build, paging_mem_size; int num_of_cpus; bool usniffer_req = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); return -EINVAL; } if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { IWL_ERR(drv, "invalid uCode magic: 0X%x\n", le32_to_cpu(ucode->magic)); return -EINVAL; } drv->fw.ucode_ver = le32_to_cpu(ucode->ver); memcpy(drv->fw.human_readable, ucode->human_readable, sizeof(drv->fw.human_readable)); build = le32_to_cpu(ucode->build); if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s %s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr, iwl_reduced_fw_name(drv)); data = ucode->data; len -= sizeof(*ucode); while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); tlv_data = tlv->data; if (len < tlv_len) { IWL_ERR(drv, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); switch (tlv_type) { case IWL_UCODE_TLV_INST: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_DATA: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT_DATA: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_BOOT: IWL_ERR(drv, "Found unexpected BOOT ucode\n"); break; case IWL_UCODE_TLV_PROBE_MAX_LEN: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->max_probe_length = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_PAN: if (tlv_len) goto invalid_tlv_len; capa->flags |= IWL_UCODE_TLV_FLAGS_PAN; break; case IWL_UCODE_TLV_FLAGS: /* must be at least one u32 */ if (tlv_len < sizeof(u32)) goto invalid_tlv_len; /* and a proper number of u32s */ if (tlv_len % sizeof(u32)) goto invalid_tlv_len; /* * This driver only reads the first u32 as * right now no more features are defined, * if that changes then either the driver * will not work with the new firmware, or * it'll not take advantage of new features. */ capa->flags = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) goto invalid_tlv_len; iwl_set_ucode_api_flags(drv, tlv_data, capa); break; case IWL_UCODE_TLV_ENABLED_CAPABILITIES: if (tlv_len != sizeof(struct iwl_ucode_capa)) goto invalid_tlv_len; iwl_set_ucode_capabilities(drv, tlv_data, capa); break; case IWL_UCODE_TLV_INIT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_errlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_errlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_ENHANCE_SENS_TBL: if (tlv_len) goto invalid_tlv_len; drv->fw.enhance_sensitivity_table = true; break; case IWL_UCODE_TLV_WOWLAN_INST: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_WOWLAN_DATA: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->standard_phy_calibration_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_DEF_CALIB: if (tlv_len != sizeof(struct iwl_tlv_calib_data)) goto invalid_tlv_len; if (iwl_set_default_calib(drv, tlv_data)) goto tlv_error; break; case IWL_UCODE_TLV_PHY_SKU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data); drv->fw.valid_tx_ant = (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >> FW_PHY_CFG_TX_CHAIN_POS; drv->fw.valid_rx_ant = (drv->fw.phy_config & FW_PHY_CFG_RX_CHAIN) >> FW_PHY_CFG_RX_CHAIN_POS; break; case IWL_UCODE_TLV_SECURE_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_NUM_OF_CPU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; num_of_cpus = le32_to_cpup((const __le32 *)tlv_data); if (num_of_cpus == 2) { drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = true; drv->fw.img[IWL_UCODE_INIT].is_dual_cpus = true; drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = true; } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { IWL_ERR(drv, "Driver support upto 2 CPUs\n"); return -EINVAL; } break; - case IWL_UCODE_TLV_CSCHEME: - if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len)) - goto invalid_tlv_len; - break; case IWL_UCODE_TLV_N_SCAN_CHANNELS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->n_scan_channels = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_VERSION: { const __le32 *ptr = (const void *)tlv_data; u32 major, minor; u8 local_comp; if (tlv_len != sizeof(u32) * 3) goto invalid_tlv_len; major = le32_to_cpup(ptr++); minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); if (major >= 35) snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%08x.%u %s", major, minor, local_comp, iwl_reduced_fw_name(drv)); else snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u %s", major, minor, local_comp, iwl_reduced_fw_name(drv)); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { const struct iwl_fw_dbg_dest_tlv *dest = NULL; const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; u8 mon_mode; pieces->dbg_dest_ver = (const u8 *)tlv_data; if (*pieces->dbg_dest_ver == 1) { dest = (const void *)tlv_data; } else if (*pieces->dbg_dest_ver == 0) { dest_v1 = (const void *)tlv_data; } else { IWL_ERR(drv, "The version is %d, and it is invalid\n", *pieces->dbg_dest_ver); break; } if (pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "dbg destination ignored, already exists\n"); break; } pieces->dbg_dest_tlv_init = true; if (dest_v1) { pieces->dbg_dest_tlv_v1 = dest_v1; mon_mode = dest_v1->monitor_mode; } else { pieces->dbg_dest_tlv = dest; mon_mode = dest->monitor_mode; } IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode)); drv->fw.dbg.n_dest_reg = (dest_v1) ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops) : tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops); drv->fw.dbg.n_dest_reg /= sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]); break; } case IWL_UCODE_TLV_FW_DBG_CONF: { const struct iwl_fw_dbg_conf_tlv *conf = (const void *)tlv_data; if (!pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n", conf->id); break; } if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) { IWL_ERR(drv, "Skip unknown configuration: %d\n", conf->id); break; } if (pieces->dbg_conf_tlv[conf->id]) { IWL_ERR(drv, "Ignore duplicate dbg config %d\n", conf->id); break; } if (conf->usniffer) usniffer_req = true; IWL_INFO(drv, "Found debug configuration: %d\n", conf->id); pieces->dbg_conf_tlv[conf->id] = conf; pieces->dbg_conf_tlv_len[conf->id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_TRIGGER: { const struct iwl_fw_dbg_trigger_tlv *trigger = (const void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { IWL_ERR(drv, "Skip unknown trigger: %u\n", trigger->id); break; } if (pieces->dbg_trigger_tlv[trigger_id]) { IWL_ERR(drv, "Ignore duplicate dbg trigger %u\n", trigger->id); break; } IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); pieces->dbg_trigger_tlv[trigger_id] = trigger; pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_DUMP_LST: { if (tlv_len != sizeof(u32)) { IWL_ERR(drv, "dbg lst mask size incorrect, skip\n"); break; } drv->fw.dbg.dump_mask = le32_to_cpup((const __le32 *)tlv_data); break; } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER, tlv_len); break; case IWL_UCODE_TLV_PAGING: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; paging_mem_size = le32_to_cpup((const __le32 *)tlv_data); IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n", paging_mem_size); if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { IWL_ERR(drv, "Paging: driver supports up to %lu bytes for paging image\n", MAX_PAGING_IMAGE_SIZE); return -EINVAL; } if (paging_mem_size & (FW_PAGING_SIZE - 1)) { IWL_ERR(drv, "Paging: image isn't multiple %lu\n", FW_PAGING_SIZE); return -EINVAL; } drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = paging_mem_size; usniffer_img = IWL_UCODE_REGULAR_USNIFFER; drv->fw.img[usniffer_img].paging_mem_size = paging_mem_size; break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (const void *)tlv_data; size_t size; struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); size = sizeof(*pieces->dbg_mem_tlv) * (pieces->n_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); if (!n) return -ENOMEM; pieces->dbg_mem_tlv = n; pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem; pieces->n_mem_tlv++; break; } case IWL_UCODE_TLV_IML: { drv->fw.iml_len = tlv_len; drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!drv->fw.iml) return -ENOMEM; break; } case IWL_UCODE_TLV_FW_RECOVERY_INFO: { const struct { __le32 buf_addr; __le32 buf_size; } *recov_info = (const void *)tlv_data; if (tlv_len != sizeof(*recov_info)) goto invalid_tlv_len; capa->error_log_addr = le32_to_cpu(recov_info->buf_addr); capa->error_log_size = le32_to_cpu(recov_info->buf_size); } break; case IWL_UCODE_TLV_FW_FSEQ_VERSION: { const struct { u8 version[32]; u8 sha1[20]; } *fseq_ver = (const void *)tlv_data; if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", fseq_ver->version); } break; case IWL_UCODE_TLV_FW_NUM_STATIONS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; if (le32_to_cpup((const __le32 *)tlv_data) > IWL_MVM_STATION_COUNT_MAX) { IWL_ERR(drv, "%d is an invalid number of station\n", le32_to_cpup((const __le32 *)tlv_data)); goto tlv_error; } capa->num_stations = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { const struct iwl_umac_debug_addrs *dbg_ptrs = (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.umac_error_event_table = le32_to_cpu(dbg_ptrs->error_info_addr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_UMAC; break; } case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: { const struct iwl_lmac_debug_addrs *dbg_ptrs = (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.lmac_error_event_table[0] = le32_to_cpu(dbg_ptrs->error_event_table_ptr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC1; break; } case IWL_UCODE_TLV_TYPE_REGIONS: iwl_parse_dbg_tlv_assert_tables(drv, tlv); fallthrough; case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_CONF_SET: if (iwlwifi_mod_params.enable_ini) iwl_dbg_tlv_alloc(drv->trans, tlv, false); break; case IWL_UCODE_TLV_CMD_VERSIONS: if (tlv_len % sizeof(struct iwl_fw_cmd_version)) { IWL_ERR(drv, "Invalid length for command versions: %u\n", tlv_len); tlv_len /= sizeof(struct iwl_fw_cmd_version); tlv_len *= sizeof(struct iwl_fw_cmd_version); } if (WARN_ON(capa->cmd_versions)) return -EINVAL; capa->cmd_versions = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!capa->cmd_versions) return -ENOMEM; capa->n_cmd_versions = tlv_len / sizeof(struct iwl_fw_cmd_version); break; case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION: if (drv->fw.phy_integration_ver) { IWL_ERR(drv, "phy integration str ignored, already exists\n"); break; } drv->fw.phy_integration_ver = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!drv->fw.phy_integration_ver) return -ENOMEM; drv->fw.phy_integration_ver_len = tlv_len; break; case IWL_UCODE_TLV_SEC_TABLE_ADDR: case IWL_UCODE_TLV_D3_KEK_KCK_ADDR: iwl_drv_set_dump_exclude(drv, tlv_type, tlv_data, tlv_len); break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; } } if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) && usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; } if (len) { IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); #if defined(__linux__) iwl_print_hex_dump(drv, IWL_DL_FW, data, len); #elif defined(__FreeBSD__) #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", data, len); #endif #endif return -EINVAL; } return 0; invalid_tlv_len: IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); tlv_error: #if defined(__linux__) iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len); #elif defined(__FreeBSD__) #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", tlv_data, tlv_len); #endif #endif return -EINVAL; } static int iwl_alloc_ucode(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type) { int i; struct fw_desc *sec; sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); if (!sec) return -ENOMEM; drv->fw.img[type].sec = sec; drv->fw.img[type].num_sec = pieces->img[type].sec_counter; for (i = 0; i < pieces->img[type].sec_counter; i++) if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) return -ENOMEM; return 0; } static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, const struct iwl_cfg *cfg) { IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); /* Verify that uCode images will fit in card's SRAM. */ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode init data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } return 0; } static struct iwl_op_mode * _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) { const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS drv->dbgfs_op_mode = debugfs_create_dir(op->name, drv->dbgfs_drv); dbgfs_dir = drv->dbgfs_op_mode; #endif op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); if (op_mode) return op_mode; IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif } return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) { /* op_mode can be NULL if its start failed */ if (drv->op_mode) { iwl_op_mode_stop(drv->op_mode); drv->op_mode = NULL; #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif } } /* * iwl_req_fw_callback - callback when firmware was loaded * * If loaded successfully, copies the firmware into buffers * for the card to fetch (via DMA). */ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; struct iwl_fw *fw = &drv->fw; const struct iwl_ucode_header *ucode; struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->trans->cfg->ucode_api_max; const unsigned int api_min = drv->trans->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; bool load_module = false; bool usniffer_images = false; bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) goto out_free_fw; if (!ucode_raw) goto try_again; IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); /* Make sure that we got at least the API version number */ if (ucode_raw->size < 4) { IWL_ERR(drv, "File size way too small!\n"); goto try_again; } /* Data from ucode file: header followed by uCode images */ ucode = (const struct iwl_ucode_header *)ucode_raw->data; if (ucode->ver) err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); else err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, &fw->ucode_capa, &usniffer_images); if (err) goto try_again; if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver; else api_ver = IWL_UCODE_API(drv->fw.ucode_ver); /* * api_ver should match the api version forming part of the * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ if (api_ver < api_min || api_ver > api_max) { IWL_ERR(drv, "Driver unable to support your firmware API. " "Driver supports v%u, firmware is v%u.\n", api_max, api_ver); goto try_again; } /* * In mvm uCode there is no difference between data and instructions * sections. */ if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->trans->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) if (iwl_alloc_ucode(drv, pieces, i)) goto out_free_fw; if (pieces->dbg_dest_tlv_init) { size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg; drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); if (!drv->fw.dbg.dest_tlv) goto out_free_fw; if (*pieces->dbg_dest_ver == 0) { memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1, dbg_dest_size); } else { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = drv->fw.dbg.dest_tlv; dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->monitor_mode = pieces->dbg_dest_tlv->monitor_mode; dest_tlv->size_power = pieces->dbg_dest_tlv->size_power; dest_tlv->wrap_count = pieces->dbg_dest_tlv->wrap_count; dest_tlv->write_ptr_reg = pieces->dbg_dest_tlv->write_ptr_reg; dest_tlv->base_shift = pieces->dbg_dest_tlv->base_shift; memcpy(dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops, sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg); /* In version 1 of the destination tlv, which is * relevant for internal buffer exclusively, * the base address is part of given with the length * of the buffer, and the size shift is give instead of * end shift. We now store these values in base_reg, * and end shift, and when dumping the data we'll * manipulate it for extracting both the length and * base address */ dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg; dest_tlv->end_shift = pieces->dbg_dest_tlv->size_shift; } } for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) { if (pieces->dbg_conf_tlv[i]) { drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i], GFP_KERNEL); if (!drv->fw.dbg.conf_tlv[i]) goto out_free_fw; } } memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz)); trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = sizeof(struct iwl_fw_dbg_trigger_missed_bcon); trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0; trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = sizeof(struct iwl_fw_dbg_trigger_cmd); trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = sizeof(struct iwl_fw_dbg_trigger_mlme); trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = sizeof(struct iwl_fw_dbg_trigger_stats); trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = sizeof(struct iwl_fw_dbg_trigger_low_rssi); trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = sizeof(struct iwl_fw_dbg_trigger_txq_timer); trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event); trigger_tlv_sz[FW_DBG_TRIGGER_BA] = sizeof(struct iwl_fw_dbg_trigger_ba); trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { /* * If the trigger isn't long enough, WARN and exit. * Someone is trying to debug something and he won't * be able to catch the bug he is trying to chase. * We'd better be noisy to be sure he knows what's * going on. */ if (WARN_ON(pieces->dbg_trigger_tlv_len[i] < (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) goto out_free_fw; drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i]; drv->fw.dbg.trigger_tlv[i] = kmemdup(pieces->dbg_trigger_tlv[i], drv->fw.dbg.trigger_tlv_len[i], GFP_KERNEL); if (!drv->fw.dbg.trigger_tlv[i]) goto out_free_fw; } } /* Now that we can no longer fail, copy information */ drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv; pieces->dbg_mem_tlv = NULL; drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv; /* * The (size - 16) / 12 formula is based on the information recorded * for each event, which is of mode 1 (including timestamp) for all * new microcodes that include this information. */ fw->init_evtlog_ptr = pieces->init_evtlog_ptr; if (pieces->init_evtlog_size) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = drv->trans->trans_cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = drv->trans->trans_cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size */ if (fw->ucode_capa.standard_phy_calibration_size > IWL_MAX_PHY_CALIBRATE_TBL_SIZE) fw->ucode_capa.standard_phy_calibration_size = IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); mutex_lock(&iwlwifi_opmode_table_mtx); switch (fw->type) { case IWL_FW_DVM: op = &iwlwifi_opmode_table[DVM_OP_MODE]; break; default: WARN(1, "Invalid fw type %d\n", fw->type); fallthrough; case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; } IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); /* add this device to the list of devices using this op_mode */ list_add_tail(&drv->list, &op->drv); if (op->ops) { drv->op_mode = _iwl_op_mode_start(drv, op); if (!drv->op_mode) { mutex_unlock(&iwlwifi_opmode_table_mtx); goto out_unbind; } } else { load_module = true; } mutex_unlock(&iwlwifi_opmode_table_mtx); /* * Complete the firmware request last so that * a driver unbind (stop) doesn't run while we * are doing the start() above. */ complete(&drv->request_firmware_complete); /* * Load the module last so we don't block anything * else from proceeding if the module fails to load * or hangs loading. */ if (load_module) request_module("%s", op->name); failure = false; goto free; try_again: /* try next, if any */ release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; goto free; out_free_fw: release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: if (failure) iwl_dealloc_ucode(drv); if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); kfree(pieces->dbg_mem_tlv); kfree(pieces); } } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) { struct iwl_drv *drv; int ret; drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) { ret = -ENOMEM; goto err; } drv->trans = trans; drv->dev = trans->dev; init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the device debugfs entries. */ drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), iwl_dbgfs_root); /* Create transport layer debugfs dir */ drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); #endif drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans); ret = iwl_request_firmware(drv, true); if (ret) { IWL_ERR(trans, "Couldn't request the fw\n"); goto err_fw; } return drv; err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); iwl_dbg_tlv_free(drv->trans); #endif kfree(drv); err: return ERR_PTR(ret); } void iwl_drv_stop(struct iwl_drv *drv) { wait_for_completion(&drv->request_firmware_complete); _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); mutex_lock(&iwlwifi_opmode_table_mtx); /* * List is empty (this item wasn't added) * when firmware loading failed -- in that * case we can't remove it from any list. */ if (!list_empty(&drv->list)) list_del(&drv->list); mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CONFIG_IWLWIFI_DEBUGFS drv->trans->ops->debugfs_cleanup(drv->trans); debugfs_remove_recursive(drv->dbgfs_drv); #endif iwl_dbg_tlv_free(drv->trans); kfree(drv); } +#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, - .enable_ini = true, + .enable_ini = ENABLE_INI, #if defined(__FreeBSD__) .disable_11n = 1, .disable_11ac = true, .disable_11ax = true, #endif /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) { int i; struct iwl_drv *drv; struct iwlwifi_opmode_table *op; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { op = &iwlwifi_opmode_table[i]; if (strcmp(op->name, name)) continue; op->ops = ops; /* TODO: need to handle exceptional case */ list_for_each_entry(drv, &op->drv, list) drv->op_mode = _iwl_op_mode_start(drv, op); mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } mutex_unlock(&iwlwifi_opmode_table_mtx); return -EIO; } IWL_EXPORT_SYMBOL(iwl_opmode_register); void iwl_opmode_deregister(const char *name) { int i; struct iwl_drv *drv; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; iwlwifi_opmode_table[i].ops = NULL; /* call the stop routine for all devices */ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) _iwl_op_mode_stop(drv); mutex_unlock(&iwlwifi_opmode_table_mtx); return; } mutex_unlock(&iwlwifi_opmode_table_mtx); } IWL_EXPORT_SYMBOL(iwl_opmode_deregister); static int __init iwl_drv_init(void) { int i, err; for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); pr_info(DRV_DESCRIPTION "\n"); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the root of iwlwifi debugfs subsystem. */ iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); #endif err = iwl_pci_register_driver(); if (err) goto cleanup_debugfs; return 0; cleanup_debugfs: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif return err; } module_init(iwl_drv_init); static void __exit iwl_drv_exit(void) { iwl_pci_unregister_driver(); #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif } module_exit(iwl_drv_exit); #ifdef CONFIG_IWLWIFI_DEBUG module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444); MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, " "4K for other devices 1:4K 2:8K 3:12K (16K buffers) 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); -module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, - bool, S_IRUGO | S_IWUSR); + +#if defined(__linux__) +static int enable_ini_set(const char *arg, const struct kernel_param *kp) +{ + int ret = 0; + bool res; + __u32 new_enable_ini; + + /* in case the argument type is a number */ + ret = kstrtou32(arg, 0, &new_enable_ini); + if (!ret) { + if (new_enable_ini > ENABLE_INI) { + pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini); + return -EINVAL; + } + goto out; + } + + /* in case the argument type is boolean */ + ret = kstrtobool(arg, &res); + if (ret) + return ret; + new_enable_ini = (res ? ENABLE_INI : 0); + +out: + iwlwifi_mod_params.enable_ini = new_enable_ini; + return 0; +} + +static const struct kernel_param_ops enable_ini_ops = { + .set = enable_ini_set +}; + +module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644); MODULE_PARM_DESC(enable_ini, - "Enable debug INI TLV FW debug infrastructure (default: true"); + "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined," + "Debug INI TLV FW debug infrastructure (default: 16)"); +#endif /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the * priority line in the PCIx). * set bt_coex_active to false, uCode will ignore the BT activity and * perform the normal operation * * User might experience transmit issue on some platform due to WiFi/BT * co-exist problem. The possible behaviors are: * Able to scan and finding all the available AP * Not able to associate with any AP * On those platforms, WiFi communication can be restored by set * "bt_coex_active" module parameter to "false" * * default: bt_coex_active = true (BT_COEX_ENABLE) */ module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, bool, 0444); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444); MODULE_PARM_DESC(power_save, "enable WiFi power management (default: disable)"); module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444); MODULE_PARM_DESC(power_level, "default power save level (range from 1 - 5, default: 1)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); module_param_named(remove_when_gone, iwlwifi_mod_params.remove_when_gone, bool, 0444); MODULE_PARM_DESC(remove_when_gone, "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, S_IRUGO); MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); diff --git a/sys/contrib/dev/iwlwifi/iwl-modparams.h b/sys/contrib/dev/iwlwifi/iwl-modparams.h index 7e8cf33867b2..cb2b2e81b28c 100644 --- a/sys/contrib/dev/iwlwifi/iwl-modparams.h +++ b/sys/contrib/dev/iwlwifi/iwl-modparams.h @@ -1,114 +1,115 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ #include #include #include #ifdef CONFIG_IWLWIFI_DEBUG #include "iwl-debug.h" #endif extern struct iwl_mod_params iwlwifi_mod_params; enum iwl_power_level { IWL_POWER_INDEX_1, IWL_POWER_INDEX_2, IWL_POWER_INDEX_3, IWL_POWER_INDEX_4, IWL_POWER_INDEX_5, IWL_POWER_NUM }; enum iwl_disable_11n { IWL_DISABLE_HT_ALL = BIT(0), IWL_DISABLE_HT_TXAGG = BIT(1), IWL_DISABLE_HT_RXAGG = BIT(2), IWL_ENABLE_HT_TXAGG = BIT(3), }; enum iwl_amsdu_size { IWL_AMSDU_DEF = 0, IWL_AMSDU_4K = 1, IWL_AMSDU_8K = 2, IWL_AMSDU_12K = 3, /* Add 2K at the end to avoid breaking current API */ IWL_AMSDU_2K = 4, }; enum iwl_uapsd_disable { IWL_DISABLE_UAPSD_BSS = BIT(0), IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1), }; /** * struct iwl_mod_params * * Holds the module parameters * * @swcrypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_[DIS,EN]ABLE_HT_* constants * @amsdu_size: See &enum iwl_amsdu_size. * @fw_restart: restart firmware, default = 1 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 * @power_save: enable power save, default = false * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. * @enable_ini: enable new FW debug infratructure (INI TLVs) */ struct iwl_mod_params { int swcrypto; unsigned int disable_11n; int amsdu_size; bool fw_restart; bool bt_coex_active; int led_mode; bool power_save; int power_level; #ifdef CONFIG_IWLWIFI_DEBUG #if defined(__linux__) u32 debug_level; #elif defined(__FreeBSD__) enum iwl_dl debug_level; #endif #endif char *nvm_file; u32 uapsd_disable; bool disable_11ac; /** * @disable_11ax: disable HE capabilities, default = false */ bool disable_11ax; bool remove_when_gone; - bool enable_ini; + u32 enable_ini; + bool disable_11be; }; static inline bool iwl_enable_rx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) return false; return true; } static inline bool iwl_enable_tx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) return false; if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) return true; /* enabled by default */ return true; } #endif /* #__iwl_modparams_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c index f1688a0f6f2c..fdb823e55792 100644 --- a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c +++ b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c @@ -1,1827 +1,1826 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" #include "fw/api/nvm-reg.h" #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" #include "mei/iwl-mei.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { /* NVM HW-Section offset (in words) definitions */ SUBSYSTEM_ID = 0x0A, HW_ADDR = 0x15, /* NVM SW-Section offset (in words) definitions */ NVM_SW_SECTION = 0x1C0, NVM_VERSION = 0, RADIO_CFG = 1, SKU = 2, N_HW_ADDRS = 3, NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, /* NVM calibration section offset (in words) definitions */ NVM_CALIB_SECTION = 0x2B8, XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_SDP = 0, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, /* NVM SW-Section offset (in words) definitions */ NVM_VERSION_EXT_NVM = 0, N_HW_ADDRS_FAMILY_8000 = 3, /* NVM PHY_SKU-Section offset (in words) definitions */ RADIO_CFG_FAMILY_EXT_NVM = 0, SKU_FAMILY_8000 = 2, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_EXTENDED = 0, NVM_LAR_OFFSET_OLD = 0x4C7, NVM_LAR_OFFSET = 0x507, NVM_LAR_ENABLED = 0x7, }; /* SKU Capabilities (actual values from NVM definition) */ enum nvm_sku_bits { NVM_SKU_CAP_BAND_24GHZ = BIT(0), NVM_SKU_CAP_BAND_52GHZ = BIT(1), NVM_SKU_CAP_11N_ENABLE = BIT(2), NVM_SKU_CAP_11AC_ENABLE = BIT(3), NVM_SKU_CAP_MIMO_DISABLE = BIT(5), }; /* * These are the channel numbers in the order that they are stored in the NVM */ static const u16 iwl_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44 , 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165 }; static const u16 iwl_ext_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181 }; static const u16 iwl_uhb_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181, /* 6-7 GHz */ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233 }; #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define NUM_5GHZ_CHANNELS 37 #define FIRST_2GHZ_HT_MINUS 5 #define LAST_2GHZ_HT_PLUS 9 #define N_HW_ADDR_MASK 0xF /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, }; #define RATES_24_OFFS 0 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) #define RATES_52_OFFS 4 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) /** * enum iwl_nvm_channel_flags - channel flags in NVM * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo * @NVM_CHANNEL_IBSS: usable as an IBSS channel * @NVM_CHANNEL_ACTIVE: active scanning allowed * @NVM_CHANNEL_RADAR: radar detection required * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS * on same channel on 2.4 or same UNII band on 5.2 * @NVM_CHANNEL_UNIFORM: uniform spreading required * @NVM_CHANNEL_20MHZ: 20 MHz channel okay * @NVM_CHANNEL_40MHZ: 40 MHz channel okay * @NVM_CHANNEL_80MHZ: 80 MHz channel okay * @NVM_CHANNEL_160MHZ: 160 MHz channel okay * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) */ enum iwl_nvm_channel_flags { NVM_CHANNEL_VALID = BIT(0), NVM_CHANNEL_IBSS = BIT(1), NVM_CHANNEL_ACTIVE = BIT(3), NVM_CHANNEL_RADAR = BIT(4), NVM_CHANNEL_INDOOR_ONLY = BIT(5), NVM_CHANNEL_GO_CONCURRENT = BIT(6), NVM_CHANNEL_UNIFORM = BIT(7), NVM_CHANNEL_20MHZ = BIT(8), NVM_CHANNEL_40MHZ = BIT(9), NVM_CHANNEL_80MHZ = BIT(10), NVM_CHANNEL_160MHZ = BIT(11), NVM_CHANNEL_DC_HIGH = BIT(12), }; /** * enum iwl_reg_capa_flags - global flags applied for the whole regulatory * domain. * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the * 2.4Ghz band is allowed. * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the * 5Ghz band is allowed. * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. * @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ enum iwl_reg_capa_flags { REG_CAPA_BF_CCD_LOW_BAND = BIT(0), REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), REG_CAPA_160MHZ_ALLOWED = BIT(2), REG_CAPA_80MHZ_ALLOWED = BIT(3), REG_CAPA_MCS_8_ALLOWED = BIT(4), REG_CAPA_MCS_9_ALLOWED = BIT(5), REG_CAPA_40MHZ_FORBIDDEN = BIT(7), REG_CAPA_DC_HIGH_ENABLED = BIT(9), REG_CAPA_11AX_DISABLED = BIT(10), }; /** * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory * domain (version 2). * @REG_CAPA_V2_STRADDLE_DISABLED: Straddle channels (144, 142, 138) are * disabled. * @REG_CAPA_V2_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the * 2.4Ghz band is allowed. * @REG_CAPA_V2_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the * 5Ghz band is allowed. * @REG_CAPA_V2_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_V2_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_V2_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. * @REG_CAPA_V2_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. * @REG_CAPA_V2_WEATHER_DISABLED: Weather radar channels (120, 124, 128, 118, * 126, 122) are disabled. * @REG_CAPA_V2_40MHZ_ALLOWED: 11n channel with a width of 40Mhz is allowed * for this regulatory domain (uvalid only in 5Ghz). * @REG_CAPA_V2_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ enum iwl_reg_capa_flags_v2 { REG_CAPA_V2_STRADDLE_DISABLED = BIT(0), REG_CAPA_V2_BF_CCD_LOW_BAND = BIT(1), REG_CAPA_V2_BF_CCD_HIGH_BAND = BIT(2), REG_CAPA_V2_160MHZ_ALLOWED = BIT(3), REG_CAPA_V2_80MHZ_ALLOWED = BIT(4), REG_CAPA_V2_MCS_8_ALLOWED = BIT(5), REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), REG_CAPA_V2_11AX_DISABLED = BIT(10), }; /* * API v2 for reg_capa_flags is relevant from version 6 and onwards of the * MCC update command response. */ #define REG_CAPA_V2_RESP_VER 6 /** * struct iwl_reg_capa - struct for global regulatory capabilities, Used for * handling the different APIs of reg_capa_flags. * * @allow_40mhz: 11n channel with a width of 40Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @disable_11ax: 11ax is forbidden for this regulatory domain. */ struct iwl_reg_capa { u16 allow_40mhz; u16 allow_80mhz; u16 allow_160mhz; u16 disable_11ax; }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { #define CHECK_AND_PRINT_I(x) \ ((flags & NVM_CHANNEL_##x) ? " " #x : "") if (!(flags & NVM_CHANNEL_VALID)) { IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", chan, flags); return; } /* Note: already can print up to 101 characters, 110 is the limit! */ IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags, CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), CHECK_AND_PRINT_I(UNIFORM), CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), CHECK_AND_PRINT_I(DC_HIGH)); #undef CHECK_AND_PRINT_I } static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, u32 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = IEEE80211_CHAN_NO_HT40; if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (ch_num <= LAST_2GHZ_HT_PLUS) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; if (ch_num >= FIRST_2GHZ_HT_MINUS) flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; else flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= IEEE80211_CHAN_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= IEEE80211_CHAN_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_IBSS)) flags |= IEEE80211_CHAN_NO_IR; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= IEEE80211_CHAN_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= IEEE80211_CHAN_RADAR; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= IEEE80211_CHAN_INDOOR_ONLY; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & IEEE80211_CHAN_NO_IR)) flags |= IEEE80211_CHAN_IR_CONCURRENT; return flags; } static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) { if (ch_idx >= NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS) { return NL80211_BAND_6GHZ; } if (ch_idx >= NUM_2GHZ_CHANNELS) return NL80211_BAND_5GHZ; return NL80211_BAND_2GHZ; } static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; u32 ch_flags; int num_of_ch; const u16 *nvm_chan; if (cfg->uhb_supported) { num_of_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { enum nl80211_band band = iwl_nl80211_band_from_channel_idx(ch_idx); if (v4) ch_flags = __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx); else ch_flags = __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx); if (band == NL80211_BAND_5GHZ && !data->sku_cap_band_52ghz_enable) continue; /* workaround to disable wide channels in 5GHz */ if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && band == NL80211_BAND_5GHZ) { ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ); } if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is * supported, hence we still want to add them to * the list of supported channels to cfg80211. */ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, nvm_chan[ch_idx], ch_flags); continue; } channel = &data->channels[n_channels]; n_channels++; channel->hw_value = nvm_chan[ch_idx]; channel->band = band; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); /* Initialize regulatory-based run-time data */ /* * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; /* don't put limitations in case we're using LAR */ if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, band, ch_flags, cfg); else channel->flags = 0; /* TODO: Don't put limitations on UHB devices as we still don't * have NVM for them */ if (cfg->uhb_supported) channel->flags = 0; iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", channel->hw_value, channel->max_power); } return n_channels; } static void iwl_init_vht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap, u8 tx_chains, u8 rx_chains) { const struct iwl_cfg *cfg = trans->cfg; int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | IEEE80211_VHT_MAX_AMPDU_1024K << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (data->vht160_supported) vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160; if (cfg->vht_mu_mimo_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; if (data->sku_cap_mimo_disabled) { num_rx_ants = 1; num_tx_ants = 1; } if (num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_2K: if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else WARN(1, "RB size of 2K is not supported by this device\n"); break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_8K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; break; case IWL_AMSDU_12K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; break; default: break; } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; /* this works because NOT_SUPPORTED == 3 */ vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); } vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; vht_cap->vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } static const u8 iwl_vendor_caps[] = { 0xdd, /* vendor element */ 0x06, /* length */ 0x00, 0x17, 0x35, /* Intel OUI */ 0x08, /* type (Intel Capabilities) */ /* followed by 16 bits of capabilities */ #define IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE BIT(0) IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE, 0x00 }; static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS), .phy_cap_info[10] = IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, }, }; static void iwl_init_he_6ghz_capa(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains) { struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap = {}; struct ieee80211_sband_iftype_data *iftype_data; u16 he_6ghz_capa = 0; u32 exp; int i; if (sband->band != NL80211_BAND_6GHZ) return; /* grab HT/VHT capabilities and calculate HE 6 GHz capabilities */ iwl_init_ht_hw_capab(trans, data, &ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); WARN_ON(!ht_cap.ht_supported); iwl_init_vht_hw_capab(trans, data, &vht_cap, tx_chains, rx_chains); WARN_ON(!vht_cap.vht_supported); he_6ghz_capa |= u16_encode_bits(ht_cap.ampdu_density, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); exp = u32_get_bits(vht_cap.cap, IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); he_6ghz_capa |= u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); exp = u32_get_bits(vht_cap.cap, IEEE80211_VHT_CAP_MAX_MPDU_MASK); he_6ghz_capa |= u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); /* we don't support extended_ht_cap_info anywhere, so no RD_RESPONDER */ if (vht_cap.cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; if (vht_cap.cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa); /* we know it's writable - we set it before ourselves */ iftype_data = (void *)(uintptr_t)sband->iftype_data; for (i = 0; i < sband->n_iftype_data; i++) iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa); } static void iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, struct ieee80211_supported_band *sband, struct ieee80211_sband_iftype_data *iftype_data, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP); /* Advertise an A-MPDU exponent extension based on * operating band */ if (sband->band != NL80211_BAND_2GHZ) iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1; else iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; if (is_ap && iwlwifi_mod_params.nvm_file) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; if ((tx_chains & rx_chains) == ANT_AB) { iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; if (!is_ap) iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_2; } else if (!is_ap) { /* If not 2x2, we need to indicate 1x1 in the * Midamble RX Max NSTS - but not for AP mode */ iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_1; } switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: case IWL_CFG_RF_TYPE_MS: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; if (!is_ap) iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; } if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT)) iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BCAST_TWT; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && !is_ap) { iftype_data->vendor_elems.data = iwl_vendor_caps; iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps); } } static void iwl_init_he_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { struct ieee80211_sband_iftype_data *iftype_data; int i; /* should only initialize once */ if (WARN_ON(sband->iftype_data)) return; BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa)); BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa)); switch (sband->band) { case NL80211_BAND_2GHZ: iftype_data = data->iftd.low; break; case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: iftype_data = data->iftd.high; break; default: WARN_ON(1); return; } memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa)); sband->iftype_data = iftype_data; sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); for (i = 0; i < sband->n_iftype_data; i++) iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i], tx_chains, rx_chains, fw); iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); } static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, u8 rx_chains, u32 sbands_flags, bool v4, const struct iwl_fw *fw) { struct device *dev = trans->dev; const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); /* 6GHz band. */ sband = &data->bands[NL80211_BAND_6GHZ]; sband->band = NL80211_BAND_6GHZ; /* use the same rates as 5GHz band */ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_6GHZ); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); else sband->n_channels = 0; if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + SKU); return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000)); } static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + NVM_VERSION); else return le32_to_cpup((const __le32 *)(nvm_sw + NVM_VERSION_EXT_NVM)); } static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + RADIO_CFG); return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); } static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { int n_hw_addr; if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + N_HW_ADDRS); n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); return n_hw_addr & N_HW_ADDR_MASK; } static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, u32 radio_cfg) { if (cfg->nvm_type != IWL_NVM_EXT) { data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); return; } /* set the radio configuration for family 8000 */ data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); } static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) { const u8 *hw_addr; hw_addr = (const u8 *)&mac_addr0; dest[0] = hw_addr[3]; dest[1] = hw_addr[2]; dest[2] = hw_addr[1]; dest[3] = hw_addr[0]; hw_addr = (const u8 *)&mac_addr1; dest[4] = hw_addr[1]; dest[5] = hw_addr[0]; } static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP(trans))); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* * If the OEM fused a valid address, use it instead of the one in the * OTP */ if (is_valid_ether_addr(data->hw_addr)) return; mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP(trans))); mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, const __be16 *nvm_hw) { const u8 *hw_addr; if (mac_override) { static const u8 reserved_mac[] = { 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 }; hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_EXT_NVM); /* * Store the MAC address from MAO section. * No byte swapping is required in MAO section */ memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC * address in the NVM, or if address is given but invalid. */ if (is_valid_ether_addr(data->hw_addr) && memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; IWL_ERR(trans, "mac address from nvm override section is not valid\n"); } if (nvm_hw) { /* read the mac address from WFMP registers */ __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_0)); __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); return; } IWL_ERR(trans, "mac address is not found\n"); } static int iwl_set_hw_address(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { if (cfg->mac_addr_from_csr) { iwl_set_hw_address_from_csr(trans, data); } else if (cfg->nvm_type != IWL_NVM_EXT) { const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); /* The byte order is little endian 16 bit, meaning 214365 */ data->hw_addr[0] = hw_addr[1]; data->hw_addr[1] = hw_addr[0]; data->hw_addr[2] = hw_addr[3]; data->hw_addr[3] = hw_addr[2]; data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; } else { iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw); } if (!is_valid_ether_addr(data->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); return -EINVAL; } if (!trans->csme_own) #if defined(__linux__) IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n", data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR)); #elif defined(__FreeBSD__) IWL_INFO(trans, "base HW address: %6D, OTP minor version: 0x%x\n", data->hw_addr, ":", iwl_read_prph(trans, REG_OTP_MINOR)); #endif return 0; } static bool iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __be16 *nvm_hw) { /* * Workaround a bug in Indonesia SKUs where the regulatory in * some 7000-family OTPs erroneously allow wide channels in * 5GHz. To check for Indonesia, we take the SKU value from * bits 1-4 in the subsystem ID and check if it is either 5 or * 9. In those cases, we need to force-disable wide channels * in 5GHz otherwise the FW will throw a sysassert when we try * to use them. */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* * Unlike the other sections in the NVM, the hw * section uses big-endian. */ u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); u8 sku = (subsystem_id & 0x1e) >> 1; if (sku == 5 || sku == 9) { IWL_DEBUG_EEPROM(trans->dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id, sku); return true; } } return false; } struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, const struct iwl_fw *fw) { struct iwl_nvm_data *data; u32 sbands_flags = 0; u8 rx_chains = fw->valid_rx_ant; u8 tx_chains = fw->valid_rx_ant; if (cfg->uhb_supported) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_UHB), GFP_KERNEL); else data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_EXT), GFP_KERNEL); if (!data) return NULL; BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) != IWL_NVM_NUM_CHANNELS_UHB); data->nvm_version = mei_nvm->nvm_version; iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg); if (data->valid_tx_ant) tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; data->sku_cap_mimo_disabled = false; data->sku_cap_band_24ghz_enable = true; data->sku_cap_band_52ghz_enable = true; data->sku_cap_11n_enable = !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL); data->sku_cap_11ac_enable = true; data->sku_cap_11ax_enable = mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT; data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT; data->n_hw_addrs = mei_nvm->n_hw_addrs; /* If no valid mac address was found - bail out */ if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) { kfree(data); return NULL; } if (data->lar_enabled && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains, sbands_flags, true, fw); return data; } IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; bool lar_enabled; u32 sku, radio_cfg; u32 sbands_flags = 0; u16 lar_config; const __le16 *ch_section; if (cfg->uhb_supported) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_UHB), GFP_KERNEL); else if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS), GFP_KERNEL); else data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_EXT), GFP_KERNEL); if (!data) return NULL; data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); iwl_set_radio_cfg(cfg, data, radio_cfg); if (data->valid_tx_ant) tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; sku = iwl_get_sku(cfg, nvm_sw, phy_sku); data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE); data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); if (cfg->nvm_type != IWL_NVM_EXT) { /* Checking for required sections */ if (!nvm_calib) { IWL_ERR(trans, "Can't parse empty Calib NVM sections\n"); kfree(data); return NULL; } ch_section = cfg->nvm_type == IWL_NVM_SDP ? ®ulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS]; /* in family 8000 Xtal calibration values moved to OTP */ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_OLD : NVM_LAR_OFFSET; lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED); lar_enabled = data->lar_enabled; ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; } /* If no valid mac address was found - bail out */ if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { kfree(data); return NULL; } if (lar_enabled && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags, false, fw); data->calib_version = 255; return data; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, struct iwl_reg_capa reg_capa, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else flags &= ~NL80211_RRF_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= NL80211_RRF_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= NL80211_RRF_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= NL80211_RRF_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= NL80211_RRF_DFS; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= NL80211_RRF_NO_OUTDOOR; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; /* * reg_capa is per regulatory domain so apply it for every channel */ if (ch_idx >= NUM_2GHZ_CHANNELS) { if (!reg_capa.allow_40mhz) flags |= NL80211_RRF_NO_HT40; if (!reg_capa.allow_80mhz) flags |= NL80211_RRF_NO_80MHZ; if (!reg_capa.allow_160mhz) flags |= NL80211_RRF_NO_160MHZ; } if (reg_capa.disable_11ax) flags |= NL80211_RRF_NO_HE; return flags; } static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver) { struct iwl_reg_capa reg_capa; if (resp_ver >= REG_CAPA_V2_RESP_VER) { reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED; reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED; reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED; reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED; } else { reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN); reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED; reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED; reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED; } return reg_capa; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info, u16 cap, u8 resp_ver) { int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; int max_num_ch; struct iwl_reg_capa reg_capa; if (cfg->uhb_supported) { max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { max_num_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { max_num_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } if (num_of_ch > max_num_ch) { IWL_DEBUG_DEV(dev, IWL_DL_LAR, "Num of channels (%d) is greater than expected. Truncating to %d\n", num_of_ch, max_num_ch); num_of_ch = max_num_ch; } if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch); /* build a regdomain rule for every valid channel */ regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); /* set alpha2 from FW. */ regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; /* parse regulatory capability flags */ reg_capa = iwl_get_reg_capa(cap, resp_ver); for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = iwl_nl80211_band_from_channel_idx(ch_idx); center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; if (!(ch_flags & NVM_CHANNEL_VALID)) { iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); continue; } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, reg_capa, cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || center_freq - prev_center_freq > 20) { valid_rules++; new_rule = true; } rule = ®d->reg_rules[valid_rules - 1]; if (new_rule) rule->freq_range.start_freq_khz = MHZ_TO_KHZ(center_freq - 10); rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); /* this doesn't matter - not used by FW */ rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); rule->flags = reg_rule_flags; /* rely on auto-calculation to merge BW of contiguous chans */ rule->flags |= NL80211_RRF_AUTO_BW; rule->freq_range.max_bandwidth_khz = 0; prev_center_freq = center_freq; prev_reg_rule_flags = reg_rule_flags; iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || band == NL80211_BAND_2GHZ) continue; reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } /* * Certain firmware versions might report no valid channels * if booted in RF-kill, i.e. not all calibrations etc. are * running. We'll get out of this situation later when the * rfkill is removed and we update the regdomain again, but * since cfg80211 doesn't accept an empty regdomain, add a * dummy (unusable) rule here in this case so we can init. */ if (!valid_rules) { valid_rules = 1; rule = ®d->reg_rules[valid_rules - 1]; rule->freq_range.start_freq_khz = MHZ_TO_KHZ(2412); rule->freq_range.end_freq_khz = MHZ_TO_KHZ(2413); rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(1); rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); } regd->n_reg_rules = valid_rules; /* * Narrow down regdom for unused regulatory rules to prevent hole * between reg rules to wmm rules. */ copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules), GFP_KERNEL); if (!copy_rd) copy_rd = ERR_PTR(-ENOMEM); kfree(regd); return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc #define MAX_NVM_FILE_LEN 16384 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len) { #define IWL_4165_DEVICE_ID 0x5501 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) if (section == NVM_SECTION_TYPE_PHY_SKU && hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) /* OTP 0x52 bug work around: it's a 1x1 device */ data[3] = ANT_B | (ANT_B << 4); } IWL_EXPORT_SYMBOL(iwl_nvm_fixups); /* * Reads external NVM from a file into mvm->nvm_sections * * HOW TO CREATE THE NVM FILE FORMAT: * ------------------------------ * 1. create hex file, format: * 3800 -> header * 0000 -> header * 5a40 -> data * * rev - 6 bit (word1) * len - 10 bit (word1) * id - 4 bit (word2) * rsv - 12 bit (word2) * * 2. flip 8bits with 8 bits per line to get the right NVM file format * * 3. create binary file from the hex file * * 4. save as "iNVM_xxx.bin" under /lib/firmware */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, struct iwl_nvm_section *nvm_sections) { int ret, section_size; u16 section_id; const struct firmware *fw_entry; const struct { __le16 word1; __le16 word2; u8 data[]; } *file_sec; const u8 *eof; u8 *temp; int max_section_size; const __le32 *dword_buff; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) #define EXT_NVM_WORD1_ID(x) ((x) >> 4) #define NVM_HEADER_0 (0x2A504C54) #define NVM_HEADER_1 (0x4E564D2A) #define NVM_HEADER_SIZE (4 * sizeof(u32)) IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); /* Maximal size depends on NVM version */ if (trans->cfg->nvm_type != IWL_NVM_EXT) max_section_size = IWL_MAX_NVM_SECTION_SIZE; else max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; /* * Obtain NVM image via request_firmware. Since we already used * request_firmware_nowait() for the firmware binary load and only * get here after that we assume the NVM request can be satisfied * synchronously. */ ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); if (ret) { IWL_ERR(trans, "ERROR: %s isn't available %d\n", nvm_file_name, ret); return ret; } IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", nvm_file_name, fw_entry->size); if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(trans, "NVM file too large\n"); ret = -EINVAL; goto out; } eof = fw_entry->data + fw_entry->size; dword_buff = (const __le32 *)fw_entry->data; /* some NVM file will contain a header. * The header is identified by 2 dwords header as follow: * dword[0] = 0x2A504C54 * dword[1] = 0x4E564D2A * * This header must be skipped when providing the NVM data to the FW. */ if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE); IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && trans->hw_rev_step == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; } } else { file_sec = (const void *)fw_entry->data; } while (true) { if (file_sec->data > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section header\n"); ret = -EINVAL; break; } /* check for EOF marker */ if (!file_sec->word1 && !file_sec->word2) { ret = 0; break; } if (trans->cfg->nvm_type != IWL_NVM_EXT) { section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); } else { section_size = 2 * EXT_NVM_WORD2_LEN( le16_to_cpu(file_sec->word2)); section_id = EXT_NVM_WORD1_ID( le16_to_cpu(file_sec->word1)); } if (section_size > max_section_size) { IWL_ERR(trans, "ERROR - section too large (%d)\n", section_size); ret = -EINVAL; break; } if (!section_size) { IWL_ERR(trans, "ERROR - section empty\n"); ret = -EINVAL; break; } if (file_sec->data + section_size > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section (%d bytes)\n", section_size); ret = -EINVAL; break; } if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, "Invalid NVM section ID %d\n", section_id)) { ret = -EINVAL; break; } temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); kfree(nvm_sections[section_id].data); nvm_sections[section_id].data = temp; nvm_sections[section_id].length = section_size; /* advance to the next section */ file_sec = (const void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); return ret; } IWL_EXPORT_SYMBOL(iwl_read_external_nvm); struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw) { struct iwl_nvm_get_info cmd = {}; struct iwl_nvm_data *nvm; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; /* * All the values in iwl_nvm_get_info_rsp v4 are the same as * in v3, except for the channel profile part of the * regulatory. So we can just access the new struct, with the * exception of the latter. */ struct iwl_nvm_get_info_rsp *rsp; struct iwl_nvm_get_info_rsp_v3 *rsp_v3; bool v4 = fw_has_api(&fw->ucode_capa, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO); size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); void *channel_profile; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) return ERR_PTR(ret); if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size, "Invalid payload len in NVM response from FW %d", iwl_rx_packet_payload_len(hcmd.resp_pkt))) { ret = -EINVAL; goto out; } rsp = (void *)hcmd.resp_pkt->data; empty_otp = !!(le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP); if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!nvm) { ret = -ENOMEM; goto out; } iwl_set_hw_address_from_csr(trans, nvm); /* TODO: if platform NVM has MAC address - override it here */ if (!is_valid_ether_addr(nvm->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); ret = -EINVAL; goto err_free; } #if defined(__linux__) IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); #elif defined(__FreeBSD__) IWL_INFO(trans, "base HW address: %6D\n", nvm->hw_addr, ":"); #endif /* Initialize general data */ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); nvm->n_hw_addrs = rsp->general.n_hw_addrs; if (nvm->n_hw_addrs == 0) IWL_WARN(trans, "Firmware declares no reserved mac addresses. OTP is empty: %d\n", empty_otp); /* Initialize MAC sku data */ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); nvm->sku_cap_11ac_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); /* Initialize PHY sku data */ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); if (le32_to_cpu(rsp->regulatory.lar_enabled) && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } rsp_v3 = (void *)rsp; channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; iwl_init_sbands(trans, nvm, channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant, sbands_flags, v4, fw); iwl_free_resp(&hcmd); return nvm; err_free: kfree(nvm); out: iwl_free_resp(&hcmd); return ERR_PTR(ret); } IWL_EXPORT_SYMBOL(iwl_get_nvm); diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs.c b/sys/contrib/dev/iwlwifi/mvm/debugfs.c index fecd7d4a7bdc..49898fd99594 100644 --- a/sys/contrib/dev/iwlwifi/mvm/debugfs.c +++ b/sys/contrib/dev/iwlwifi/mvm/debugfs.c @@ -1,2145 +1,1947 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include +#include #include #include #include "mvm.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" #include "iwl-modparams.h" #include "fw/error-dump.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, budget; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); mutex_unlock(&mvm->mutex); if (budget < 0) return budget; pos = scnprintf(buf, sizeof(buf), "%d\n", budget); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; iwl_mvm_enter_ctkill(mvm); return count; } static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) return -EINVAL; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF) ? : count; mutex_unlock(&mvm->mutex); return ret; } IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta; int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) return -EINVAL; if (drain < 0 || drain > 1) return -EINVAL; mutex_lock(&mvm->mutex); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) ret = -ENOENT; else ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; const struct fw_img *img; unsigned int ofs, len; size_t ret; u8 *ptr; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; /* default is to dump the entire data segment */ img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; if (mvm->dbgfs_sram_len) { ofs = mvm->dbgfs_sram_offset; len = mvm->dbgfs_sram_len; } ptr = kzalloc(len, GFP_KERNEL); if (!ptr) return -ENOMEM; iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); kfree(ptr); return ret; } static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { const struct fw_img *img; u32 offset, len; u32 img_offset, img_len; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; if (sscanf(buf, "%x,%x", &offset, &len) == 2) { if ((offset & 0x3) || (len & 0x3)) return -EINVAL; if (offset + len > img_offset + img_len) return -EINVAL; mvm->dbgfs_sram_offset = offset; mvm->dbgfs_sram_len = len; } else { mvm->dbgfs_sram_offset = 0; mvm->dbgfs_sram_len = 0; } return count; } static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos; if (!mvm->temperature_test) pos = scnprintf(buf , sizeof(buf), "disabled\n"); else pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } /* * Set NIC Temperature * Cause the driver to ignore the actual NIC temperature reported by the FW * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE */ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int temperature; if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) return -EIO; if (kstrtoint(buf, 10, &temperature)) return -EINVAL; /* not a legal temperature */ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) return -EINVAL; mutex_lock(&mvm->mutex); if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { if (!mvm->temperature_test) goto out; mvm->temperature_test = false; /* Since we can't read the temp while awake, just set * it to zero until we get the next RX stats from the * firmware. */ mvm->temperature = 0; } else { mvm->temperature_test = true; mvm->temperature = temperature; } IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", mvm->temperature_test ? "En" : "Dis" , mvm->temperature); /* handle the temperature change */ iwl_mvm_tt_handler(mvm); out: mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, ret; s32 temp; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); if (ret) return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #ifdef CONFIG_ACPI static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[256]; int pos = 0; int bufsz = sizeof(buf); int tbl_idx; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (tbl_idx < 0) { mutex_unlock(&mvm->mutex); return tbl_idx; } if (!tbl_idx) { pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #endif static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct ieee80211_sta *sta; char buf[400]; int i, pos = 0, bufsz = sizeof(buf); mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta) pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); else if (IS_ERR(sta)) pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta)); else pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; struct iwl_mvm *mvm = lq_sta->pers.drv; static const size_t bufsz = 2048; char *buff; int desc = 0; ssize_t ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; mutex_lock(&mvm->mutex); desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n", lq_sta->pers.dbg_agg_frame_count_lim); desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags); desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags); if (desc < bufsz - 1) buff[desc++] = '\n'; mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; u16 amsdu_len; if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; /* only change from debug set <-> debug unset */ if (amsdu_len && mvmsta->orig_amsdu_len) return -EBUSY; if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) sta->max_tid_amsdu_len[i] = amsdu_len; } else { sta->max_amsdu_len = mvmsta->orig_amsdu_len; mvmsta->orig_amsdu_len = 0; } return count; } static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); char buf[32]; int pos; pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", mvmsta->orig_amsdu_len); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos = 0; pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", mvm->disable_power_off); pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret, val; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!strncmp("disable_power_off_d0=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off = val; } else if (!strncmp("disable_power_off_d3=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off_d3 = val; } else { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_power_update_device(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); BT_MBOX_PRINT(0, LE_PROF1, false); BT_MBOX_PRINT(0, LE_PROF2, false); BT_MBOX_PRINT(0, LE_PROF_OTHER, false); BT_MBOX_PRINT(0, CHL_SEQ_N, false); BT_MBOX_PRINT(0, INBAND_S, false); BT_MBOX_PRINT(0, LE_MIN_RSSI, false); BT_MBOX_PRINT(0, LE_SCAN, false); BT_MBOX_PRINT(0, LE_ADV, false); BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); BT_MBOX_PRINT(0, OPEN_CON_1, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); BT_MBOX_PRINT(1, IP_SR, false); BT_MBOX_PRINT(1, LE_MSTR, false); BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); BT_MBOX_PRINT(1, MSG_TYPE, false); BT_MBOX_PRINT(1, SSN, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); BT_MBOX_PRINT(2, SNIFF_ACT, false); BT_MBOX_PRINT(2, PAG, false); BT_MBOX_PRINT(2, INQUIRY, false); BT_MBOX_PRINT(2, CONN, false); BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); BT_MBOX_PRINT(2, DISC, false); BT_MBOX_PRINT(2, SCO_TX_ACT, false); BT_MBOX_PRINT(2, SCO_RX_ACT, false); BT_MBOX_PRINT(2, ESCO_RE_TX, false); BT_MBOX_PRINT(2, SCO_DURATION, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); BT_MBOX_PRINT(3, OPEN_CON_2, false); BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); BT_MBOX_PRINT(3, UPDATE_REQUEST, true); return pos; } static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", le32_to_cpu(notif->primary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", le32_to_cpu(notif->secondary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef BT_MBOX_PRINT static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_primary_ci)); pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 bt_tx_prio; if (sscanf(buf, "%u", &bt_tx_prio) != 1) return -EINVAL; if (bt_tx_prio > 4) return -EINVAL; mvm->bt_tx_prio = bt_tx_prio; return count; } static ssize_t iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { static const char * const modes_str[BT_FORCE_ANT_MAX] = { [BT_FORCE_ANT_DIS] = "dis", [BT_FORCE_ANT_AUTO] = "auto", [BT_FORCE_ANT_BT] = "bt", [BT_FORCE_ANT_WIFI] = "wifi", }; int ret, bt_force_ant_mode; ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); if (ret < 0) return ret; bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) goto out; mvm->bt_force_ant_mode = bt_force_ant_mode; IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_bt_init_conf(mvm); else ret = 0; out: mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; int ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", mvm->trans->cfg->fw_name_pre); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->name); pos += scnprintf(pos, endpos - pos, "Bus: %s\n", mvm->fwrt.dev->bus->name); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buf; size_t bufsz; int pos; ssize_t ret; bufsz = mvm->fw->phy_integration_ver_len + 2; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len, mvm->fw->phy_integration_ver); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #define PRINT_STATS_LE32(_struct, _memb) \ pos += scnprintf(buf + pos, bufsz - pos, \ fmt_table, #_memb, \ le32_to_cpu(_struct->_memb)) static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; static const char *fmt_table = "\t%-30s %10u\n"; static const char *fmt_header = "%-32s\n"; int pos = 0; char *buf; int ret; size_t bufsz; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1; else /* 43 = size of each data line; 33 = size of each header */ bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) iwl_mvm_request_statistics(mvm, false); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm; PRINT_STATS_LE32(ofdm, ina_cnt); PRINT_STATS_LE32(ofdm, fina_cnt); PRINT_STATS_LE32(ofdm, plcp_err); PRINT_STATS_LE32(ofdm, crc32_err); PRINT_STATS_LE32(ofdm, overrun_err); PRINT_STATS_LE32(ofdm, early_overrun_err); PRINT_STATS_LE32(ofdm, crc32_good); PRINT_STATS_LE32(ofdm, false_alarm_cnt); PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); PRINT_STATS_LE32(ofdm, sfd_timeout); PRINT_STATS_LE32(ofdm, fina_timeout); PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ack_cnt); PRINT_STATS_LE32(ofdm, sent_cts_cnt); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, mh_format_err); PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); PRINT_STATS_LE32(ofdm, reserved); } else { struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm; PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck; PRINT_STATS_LE32(cck, ina_cnt); PRINT_STATS_LE32(cck, fina_cnt); PRINT_STATS_LE32(cck, plcp_err); PRINT_STATS_LE32(cck, crc32_err); PRINT_STATS_LE32(cck, overrun_err); PRINT_STATS_LE32(cck, early_overrun_err); PRINT_STATS_LE32(cck, crc32_good); PRINT_STATS_LE32(cck, false_alarm_cnt); PRINT_STATS_LE32(cck, fina_sync_err_cnt); PRINT_STATS_LE32(cck, sfd_timeout); PRINT_STATS_LE32(cck, fina_timeout); PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ack_cnt); PRINT_STATS_LE32(cck, sent_cts_cnt); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, mh_format_err); PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); PRINT_STATS_LE32(cck, reserved); } else { struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck; PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_non_phy_v3 *general = &mvm->rx_stats_v3.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_bssid_frames); PRINT_STATS_LE32(general, filtered_frames); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, dsp_false_alarms); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); PRINT_STATS_LE32(general, directed_data_mpdu); } else { struct mvm_statistics_rx_non_phy *general = &mvm->rx_stats.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_ht_phy_v1 *ht = &mvm->rx_stats_v3.ofdm_ht; PRINT_STATS_LE32(ht, plcp_err); PRINT_STATS_LE32(ht, overrun_err); PRINT_STATS_LE32(ht, early_overrun_err); PRINT_STATS_LE32(ht, crc32_good); PRINT_STATS_LE32(ht, crc32_err); PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_crc32_good); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } else { struct mvm_statistics_rx_ht_phy *ht = &mvm->rx_stats.ofdm_ht; PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef PRINT_STAT_LE32 static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, struct iwl_mvm_frame_stats *stats) { char *buff, *pos, *endpos; int idx, i; int ret; static const size_t bufsz = 1024; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; spin_lock_bh(&mvm->drv_stats_lock); pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames, stats->ht_frames, stats->vht_frames); pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames, stats->bw_40_frames, stats->bw_80_frames); pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames); pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames, stats->mimo2_frames); pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames, stats->success_frames); pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames); pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count); pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0); pos += scnprintf(pos, endpos - pos, "Last Rates\n"); idx = stats->last_frame_idx - 1; for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); if (stats->last_rates[idx] == 0) continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); pos += rs_pretty_print_rate_v1(pos, endpos - pos, stats->last_rates[idx]); if (pos < endpos - 1) *pos++ = '\n'; } spin_unlock_bh(&mvm->drv_stats_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats); } static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int __maybe_unused ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); /* allow one more restart that we're provoking here */ if (mvm->fw_restart >= 0) mvm->fw_restart++; if (count == 6 && !strcmp(buf, "nolog\n")) { set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); } /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LONG_GROUP, REPLY_ERROR), 0, 0, NULL); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (count == 6 && !strcmp(buf, "nolog\n")) set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); iwl_force_nmi(mvm->trans); return count; } static ssize_t iwl_dbgfs_scan_ant_rxchain_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); /* print which antennas were set for the scan command by the user */ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); if (mvm->scan_rx_ant & ANT_A) pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 scan_rx_ant; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) return -EINVAL; if (scan_rx_ant > ANT_ABC) return -EINVAL; if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) return -EINVAL; if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } return count; } static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; ret = hex2bin(cmd.indirection_table, buf, nbytes); if (ret) return ret; /* * The input is the redirection table, partial or full. * Repeat the pattern if needed. * For example, input of 01020F will be repeated 42 times, * indirecting RSS hash results to queues 1, 2, 15 (skipping * queues 3 - 14). */ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; for (i = 1; i < num_repeats; i++) memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes); /* handle cut in the middle pattern for the last places */ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); else ret = 0; mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_op_mode *opmode = container_of((void *)mvm, struct iwl_op_mode, op_mode_specific); struct iwl_rx_cmd_buffer rxb = { ._rx_page_order = 0, .truesize = 0, /* not used */ ._offset = 0, }; struct iwl_rx_packet *pkt; int bin_len = count / 2; int ret = -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; /* supporting only MQ RX */ if (!mvm->trans->trans_cfg->mq_rx_supported) return -ENOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); if (!rxb._page) return -ENOMEM; pkt = rxb_addr(&rxb); ret = hex2bin(page_address(rxb._page), buf, bin_len); if (ret) goto out; /* avoid invalid memory access and malformed packet */ if (bin_len < sizeof(*pkt) || bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt)) goto out; local_bh_disable(); iwl_mvm_rx_mq(opmode, NULL, &rxb); local_bh_enable(); ret = 0; out: iwl_free_rxb(&rxb); return ret ?: count; } static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) { struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; struct sk_buff *beacon; struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate; int i; len /= 2; /* Element len should be represented by u8 */ if (len >= U8_MAX) return -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!iwl_mvm_has_new_tx_api(mvm) && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; mutex_lock(&mvm->mutex); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); if (!vif) continue; if (vif->type == NL80211_IFTYPE_AP) break; } if (i == NUM_MAC_INDEX_DRIVER || !vif) goto out_err; mvm->hw->extra_beacon_tailroom = len; beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); if (!beacon) goto out_err; if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { dev_kfree_skb(beacon); goto out_err; } mvm->beacon_inject_active = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); beacon_cmd.flags = cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); return 0; out_err: mutex_unlock(&mvm->mutex); return -EINVAL; } static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); mvm->hw->extra_beacon_tailroom = 0; return ret ?: count; } static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); mvm->hw->extra_beacon_tailroom = 0; mvm->beacon_inject_active = false; return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int conf; char buf[8]; const size_t bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { unsigned int conf_id; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = kstrtouint(buf, 0, &conf_id); if (ret) return ret; if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (count == 0) return 0; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL); iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); return count; } static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 timepoint; if (kstrtou32(buf, 0, &timepoint)) return -EINVAL; if (timepoint == IWL_FW_INI_TIME_POINT_INVALID || timepoint >= IWL_FW_INI_TIME_POINT_NUM) return -EINVAL; iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL); return count; } -#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - const struct iwl_fw_bcast_filter *filter; - char *buf; - int bufsz = 1024; - int i, j, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; cmd.filters[i].attrs[0].mask; i++) { - filter = &cmd.filters[i]; - - ADD_TEXT("Filter [%d]:\n", i); - ADD_TEXT("\tDiscard=%d\n", filter->discard); - ADD_TEXT("\tFrame Type: %s\n", - filter->frame_type ? "IPv4" : "Generic"); - - for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { - const struct iwl_fw_bcast_filter_attr *attr; - - attr = &filter->attrs[j]; - if (!attr->mask) - break; - - ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", - j, attr->offset, - attr->offset_type ? "IP End" : - "Payload Start", - be32_to_cpu(attr->mask), - be32_to_cpu(attr->val), - le16_to_cpu(attr->reserved1)); - } - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int pos, next_pos; - struct iwl_fw_bcast_filter filter = {}; - struct iwl_bcast_filter_cmd cmd; - u32 filter_id, attr_id, mask, value; - int err = 0; - - if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, - &filter.frame_type, &pos) != 3) - return -EINVAL; - - if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || - filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) - return -EINVAL; - - for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); - attr_id++) { - struct iwl_fw_bcast_filter_attr *attr = - &filter.attrs[attr_id]; - - if (pos >= count) - break; - - if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", - &attr->offset, &attr->offset_type, - &mask, &value, &next_pos) != 4) - return -EINVAL; - - attr->mask = cpu_to_be32(mask); - attr->val = cpu_to_be32(value); - if (mask) - filter.num_attrs++; - - pos += next_pos; - } - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], - &filter, sizeof(filter)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - char *buf; - int bufsz = 1024; - int i, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { - const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; - - ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", - i, mac->default_discard, mac->attached_filters); - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_bcast_filter_cmd cmd; - struct iwl_fw_bcast_mac mac = {}; - u32 mac_id, attached_filters; - int err = 0; - - if (!mvm->bcast_filters) - return -ENOENT; - - if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, - &attached_filters) != 3) - return -EINVAL; - - if (mac_id >= ARRAY_SIZE(cmd.macs) || - mac.default_discard > 1 || - attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) - return -EINVAL; - - mac.attached_filters = cpu_to_le16(attached_filters); - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], - &mac, sizeof(mac)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} -#endif - #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, mvm, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) #define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, sta, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); if (!mvm->dbgfs_prph_reg_addr) return -EINVAL; pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr, iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 args; u32 value; args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); /* if we only want to set the reg address - nothing more to do */ if (args == 1) goto out; /* otherwise, make sure we have both address and value */ if (args != 2) return -EINVAL; iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); out: return count; } static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); mutex_unlock(&mvm->mutex); return ret ?: count; } struct iwl_mvm_sniffer_apply { struct iwl_mvm *mvm; u8 *bssid; u16 aid; }; static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm_sniffer_apply *apply = data; apply->mvm->cur_aid = cpu_to_le16(apply->aid); memcpy(apply->mvm->cur_bssid, apply->bssid, sizeof(apply->mvm->cur_bssid)); return true; } static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; struct iwl_mvm_sniffer_apply apply = { .mvm = mvm, }; u16 wait_cmds[] = { WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), }; u32 aid; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); if (ret != 7) return -EINVAL; he_mon_cmd.aid = cpu_to_le16(aid); apply.aid = aid; apply.bssid = (void *)he_mon_cmd.bssid; mutex_lock(&mvm->mutex); /* * Use the notification waiter to get our function triggered * in sequence with other RX. This ensures that frames we get * on the RX queue _before_ the new configuration is applied * still have mvm->cur_aid pointing to the old AID, and that * frames on the RX queue _after_ the firmware processed the * new configuration (and sent the response, synchronously) * get mvm->cur_aid correctly set to the new AID. */ iwl_init_notification_wait(&mvm->notif_wait, &wait, wait_cmds, ARRAY_SIZE(wait_cmds), iwl_mvm_sniffer_apply, &apply); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), 0, sizeof(he_mon_cmd), &he_mon_cmd); /* no need to really wait, we already did anyway */ iwl_remove_notification(&mvm->notif_wait, &wait); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], mvm->cur_bssid[4], mvm->cur_bssid[5]); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; unsigned int pos = 0; size_t bufsz = sizeof(buf); int i; mutex_lock(&mvm->mutex); for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; struct iwl_ltr_config_cmd ltr_config = {0}; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", <r_config.flags, <r_config.static_long, <r_config.static_short, <r_config.ltr_cfg_values[0], <r_config.ltr_cfg_values[1], <r_config.ltr_cfg_values[2], <r_config.ltr_cfg_values[3]) != 7) { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), <r_config); mutex_unlock(&mvm->mutex); if (ret) IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); return ret ?: count; } static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = 0; u16 op_id; if (kstrtou16(buf, 10, &op_id)) return -EINVAL; /* value zero triggers re-sending the default table to the device */ if (!op_id) { mutex_lock(&mvm->mutex); ret = iwl_rfi_send_config_cmd(mvm, NULL); mutex_unlock(&mvm->mutex); } else { ret = -EOPNOTSUPP; /* in the future a new table will be added */ } return ret ?: count; } /* The size computation is as follows: * each number needs at most 3 characters, number of rows is the size of * the table; So, need 5 chars for the "freq: " part and each tuple afterwards * needs 6 characters for numbers and 5 for the punctuation around. */ #define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\ (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5))) static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_rfi_freq_table_resp_cmd *resp; u32 status; char buf[IWL_RFI_BUF_SIZE]; int i, j, pos = 0; resp = iwl_rfi_get_freq_table(mvm); if (IS_ERR(resp)) return PTR_ERR(resp); status = le32_to_cpu(resp->status); if (status != RFI_FREQ_TABLE_OK) { scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status); goto out; } for (i = 0; i < ARRAY_SIZE(resp->table); i++) { pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ", resp->table[i].freq); for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++) pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "(%d, %d) ", resp->table[i].channels[j], resp->table[i].bands[j]); pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n"); } out: kfree(resp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); -#endif - #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16); static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd cmd = {}; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, }; size_t delta; ssize_t ret, len; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); /* Take care of alignment of both the position and the length */ delta = *ppos & 0x3; cmd.addr = cpu_to_le32(*ppos - delta); cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } len = min((size_t)le32_to_cpu(rsp->len) << 2, iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); len = min(len - delta, count); if (len < 0) { ret = -EFAULT; goto out; } ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len); *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static ssize_t iwl_dbgfs_mem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd *cmd; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = {}; size_t cmd_size; size_t data_size; u32 op, len; ssize_t ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); if (*ppos & 0x3 || count < 4) { op = DEBUG_MEM_OP_WRITE_BYTES; len = min(count, (size_t)(4 - (*ppos & 0x3))); data_size = len; } else { op = DEBUG_MEM_OP_WRITE; len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); data_size = len << 2; } cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); cmd = kzalloc(cmd_size, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->op = cpu_to_le32(op); cmd->len = cpu_to_le32(len); cmd->addr = cpu_to_le32(*ppos); if (copy_from_user((void *)cmd->data, user_buf, data_size)) { kfree(cmd); return -EFAULT; } hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void *)cmd; hcmd.len[0] = cmd_size; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); kfree(cmd); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } ret = data_size; *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static const struct file_operations iwl_dbgfs_mem_ops = { .read = iwl_dbgfs_mem_read, .write = iwl_dbgfs_mem_write, .open = simple_open, .llseek = default_llseek, }; void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) { MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); } MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); } void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; - char buf[100]; spin_lock_init(&mvm->drv_stats_lock); MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600); if (mvm->fw->phy_integration_ver) MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400); #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode); MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { - bcast_dir = debugfs_create_dir("bcast_filtering", - mvm->debugfs_dir); - - debugfs_create_bool("override", 0600, bcast_dir, - &mvm->dbgfs_bcast_filtering.override); - - MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, - bcast_dir, 0600); - MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, - bcast_dir, 0600); - } -#endif - #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert); debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, &mvm->last_netdetect_scans); #endif debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled); debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob); debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob); debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob); debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob); debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob); debugfs_create_blob("nvm_reg", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_reg_blob); debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, &iwl_dbgfs_mem_ops); /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ - snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); - debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); + if (!IS_ERR(mvm->debugfs_dir)) { + char buf[100]; + + snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, + buf); + } } diff --git a/sys/contrib/dev/iwlwifi/mvm/fw.c b/sys/contrib/dev/iwlwifi/mvm/fw.c index 7da7b1c00811..747d03471f0d 100644 --- a/sys/contrib/dev/iwlwifi/mvm/fw.c +++ b/sys/contrib/dev/iwlwifi/mvm/fw.c @@ -1,1893 +1,1742 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" #include "mvm.h" #include "fw/dbg.h" #include "iwl-phy-db.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #define MVM_UCODE_ALIVE_TIMEOUT (HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) -#define IWL_PPAG_MASK 3 -#define IWL_PPAG_ETSI_MASK BIT(0) - #define IWL_TAS_US_MCC 0x5553 #define IWL_TAS_CANADA_MCC 0x4341 struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; }; static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) { struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { .valid = cpu_to_le32(valid_tx_ant), }; IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, sizeof(tx_ant_cmd), &tx_ant_cmd); } static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) { int i; struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), }; if (mvm->trans->num_rx_queues == 1) return 0; /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = 1 + (i % (mvm->trans->num_rx_queues - 1)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), }; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD); int ret; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); if (ret) IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); else IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); return ret; } void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; __le32 *dump_data = mfu_dump_notif->data; int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); for (i = 0; i < n_words; i++) IWL_DEBUG_INFO(mvm, "MFUART assert dump, dword %u: 0x%08x\n", le16_to_cpu(mfu_dump_notif->index_num) * n_words + i, le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; struct iwl_umac_alive *umac; struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; u16 status; u32 lmac_error_event_table, umac_error_table; u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, UCODE_ALIVE_NTFY, 0); + u32 i; if (version == 6) { struct iwl_alive_ntf_v6 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; mvm->trans->dbg.imr_data.imr_enable = le32_to_cpu(palive->imr.enabled); mvm->trans->dbg.imr_data.imr_size = le32_to_cpu(palive->imr.size); mvm->trans->dbg.imr_data.imr2sram_remainbyte = mvm->trans->dbg.imr_data.imr_size; mvm->trans->dbg.imr_data.imr_base_addr = palive->imr.base_addr; mvm->trans->dbg.imr_data.imr_curr_addr = le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr); IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n", mvm->trans->dbg.imr_data.imr_enable, mvm->trans->dbg.imr_data.imr_size, le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr)); + + if (!mvm->trans->dbg.imr_data.imr_enable) { + for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) { + struct iwl_ucode_tlv *reg_tlv; + struct iwl_fw_ini_region_tlv *reg; + + reg_tlv = mvm->trans->dbg.active_regions[i]; + if (!reg_tlv) + continue; + + reg = (void *)reg_tlv->data; + /* + * We have only one DRAM IMR region, so we + * can break as soon as we find the first + * one. + */ + if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { + mvm->trans->dbg.unsupported_region_msk |= BIT(i); + break; + } + } + } } if (version >= 5) { struct iwl_alive_ntf_v5 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", mvm->trans->sku_id[0], mvm->trans->sku_id[1], mvm->trans->sku_id[2]); } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { struct iwl_alive_ntf_v4 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v3)) { struct iwl_alive_ntf_v3 *palive3; if (pkt_len < sizeof(*palive3)) return false; palive3 = (void *)pkt->data; umac = &palive3->umac_data; lmac1 = &palive3->lmac_data; status = le16_to_cpu(palive3->status); } else { WARN(1, "unsupported alive notification (size %d)\n", iwl_rx_packet_payload_len(pkt)); /* get timeout later */ return false; } lmac_error_event_table = le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table); if (lmac2) mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & ~FW_ADDR_CACHE_CONTROL; if (umac_error_table) { if (umac_error_table >= mvm->trans->cfg->min_umac_error_event_table) { iwl_fw_umac_set_alive_err_table(mvm->trans, umac_error_table); } else { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", umac_error_table, (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); } } alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", status, lmac1->ver_type, lmac1->ver_subtype); if (lmac2) IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); IWL_DEBUG_FW(mvm, "UMAC version: Major - 0x%x, Minor - 0x%x\n", le32_to_cpu(umac->umac_major), le32_to_cpu(umac->umac_minor)); iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac); return true; } static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_phy_db *phy_db = data; if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; enum iwl_device_family device_family = trans->trans_cfg->device_family; if (device_family < IWL_DEVICE_FAMILY_8000) return; if (device_family <= IWL_DEVICE_FAMILY_9000) IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n", iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION)); else IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n", iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION)); IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n", iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE)); } static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; struct iwl_mvm_alive_data alive_data = {}; const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; bool run_in_rfkill = ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); /* * We want to load the INIT firmware even in RFKILL * For the unified firmware case, the ucode_type is not * INIT, but we still need to run it. */ ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); if (ret) { iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { struct iwl_trans *trans = mvm->trans; /* SecBoot info */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); } iwl_mvm_print_pd_notification(mvm); /* LMAC/UMAC PC info */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { IWL_ERR(mvm, "UMAC PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_UMAC_CURRENT_PC)); IWL_ERR(mvm, "LMAC PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_LMAC1_CURRENT_PC)); if (iwl_mvm_is_cdb_supported(mvm)) IWL_ERR(mvm, "LMAC2 PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_LMAC2_CURRENT_PC)); } if (ret == -ETIMEDOUT) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait); if (ret) { IWL_ERR(mvm, "Timeout waiting for PNVM load!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they * could be stopped, so wake them up. In firmware restart, * mac80211 will have the queues stopped as well until the * reconfiguration completes. During normal startup, they * will be empty. */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); /* * Set a 'fake' TID for the command queue, since we use the * hweight() of the tid_bitmap as a refcount now. Not that * we ever even consider the command queue as one we might * want to reuse, but be safe nevertheless. */ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = BIT(IWL_MAX_TID_COUNT + 2); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_fw_set_dbg_rec_on(&mvm->fwrt); #endif /* * All the BSSes in the BSS table include the GP2 in the system * at the beacon Rx time, this is of course no longer relevant * since we are resetting the firmware. * Purge all the BSS table. */ cfg80211_bss_flush(mvm->hw->wiphy); return 0; } static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait init_wait; struct iwl_nvm_access_complete_cmd nvm_complete = {}; struct iwl_init_extended_cfg_cmd init_cfg = { .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), }; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; int ret; if (mvm->trans->cfg->tx_with_siso_diversity) init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY)); lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &init_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_init_complete, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; } iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); /* Send init config command to mark that we are sending NVM access * commands */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), CMD_SEND_IN_RFKILL, sizeof(init_cfg), &init_cfg); if (ret) { IWL_ERR(mvm, "Failed to run init config command: %d\n", ret); goto error; } /* Load NVM to NIC if needed */ if (mvm->nvm_file_name) { ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) goto error; ret = iwl_mvm_load_nvm_to_nic(mvm); if (ret) goto error; } if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto error; } } ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), CMD_SEND_IN_RFKILL, sizeof(nvm_complete), &nvm_complete); if (ret) { IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", ret); goto error; } /* We wait for the INIT complete notification */ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) return ret; /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); return ret; } } mvm->rfkill_safe_init_done = true; return 0; error: iwl_remove_notification(&mvm->notif_wait, &init_wait); return ret; } #ifdef CONFIG_ACPI static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, struct iwl_phy_specific_cfg *phy_filters) { /* * TODO: read specific phy config from BIOS * ACPI table for this feature has not been defined yet, * so for now we use hardcoded values. */ if (IWL_MVM_PHY_FILTER_CHAIN_A) { phy_filters->filter_cfg_chain_a = cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A); } if (IWL_MVM_PHY_FILTER_CHAIN_B) { phy_filters->filter_cfg_chain_b = cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B); } if (IWL_MVM_PHY_FILTER_CHAIN_C) { phy_filters->filter_cfg_chain_c = cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C); } if (IWL_MVM_PHY_FILTER_CHAIN_D) { phy_filters->filter_cfg_chain_d = cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D); } } #else /* CONFIG_ACPI */ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, struct iwl_phy_specific_cfg *phy_filters) { } #endif /* CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_EFI) static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { u8 cmd_ver; int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, SAR_OFFSET_MAPPING_TABLE_CMD), .flags = 0, .data[0] = &mvm->fwrt.sgom_table, .len[0] = sizeof(mvm->fwrt.sgom_table), .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; if (!mvm->fwrt.sgom_enabled) { IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); return 0; } cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver != 2) { IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", cmd_ver); return 0; } ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); return ret; } #else static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { return 0; } #endif static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { u32 cmd_id = PHY_CONFIGURATION_CMD; struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; struct iwl_phy_specific_cfg phy_filters = {}; u8 cmd_ver; size_t cmd_size; if (iwl_mvm_has_unified_ucode(mvm) && !mvm->trans->cfg->tx_with_siso_diversity) return 0; if (mvm->trans->cfg->tx_with_siso_diversity) { /* * TODO: currently we don't set the antenna but letting the NIC * to decide which antenna to use. This should come from BIOS. */ phy_cfg_cmd.phy_cfg = cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); } /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); /* set flags extra PHY configuration flags from the device's cfg */ phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = mvm->fw->default_calib[ucode_type].flow_trigger; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 3) { iwl_mvm_phy_filter_init(mvm, &phy_filters); memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters, sizeof(struct iwl_phy_specific_cfg)); } IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : sizeof(struct iwl_phy_cfg_cmd_v1); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); } int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait calib_wait; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &calib_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_phy_db_entry, mvm->phy_db); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); if (ret) { IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); goto remove_notif; } if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto remove_notif; } /* Read the NVM only at driver load time, no need to do this twice */ if (!mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto remove_notif; } } /* In case we read the NVM from external file, load it to the NIC */ if (mvm->nvm_file_name) { ret = iwl_mvm_load_nvm_to_nic(mvm); if (ret) goto remove_notif; } WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, "Too old NVM version (0x%0x, required = 0x%0x)", mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); /* * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); goto remove_notif; } mvm->rfkill_safe_init_done = true; /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto remove_notif; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); goto remove_notif; } /* * Some things may run in the background now, but we * just wait for the calibration complete notification. */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); if (!ret) goto out; if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 0; } else { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); } goto out; remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->rfkill_safe_init_done = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + sizeof(struct ieee80211_channel) + sizeof(struct ieee80211_rate), GFP_KERNEL); if (!mvm->nvm_data) return -ENOMEM; mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = (void *)(mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } return ret; } static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), }; if (!mvm->trans->ltr_enabled) return 0; return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(cmd), &cmd); } #ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { u32 cmd_id = REDUCE_TX_POWER_CMD; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; __le16 *per_chain; int ret; u16 len = 0; u32 n_subbands; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); - - if (cmd_ver == 6) { + if (cmd_ver == 7) { + len = sizeof(cmd.v7); + n_subbands = IWL_NUM_SUB_BANDS_V2; + per_chain = cmd.v7.per_chain[0][0]; + cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); + } else if (cmd_ver == 6) { len = sizeof(cmd.v6); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v6.per_chain[0][0]; } else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { len = sizeof(cmd.v5); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v5.per_chain[0][0]; } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { len = sizeof(cmd.v4); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v4.per_chain[0][0]; } else { len = sizeof(cmd.v3); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v3.per_chain[0][0]; } /* all structs have the same common part, add it */ len += sizeof(cmd.common); ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, IWL_NUM_CHAIN_TABLES, n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ if (ret) return ret; iwl_mei_set_power_limit(per_chain); IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { union iwl_geo_tx_power_profiles_cmd geo_tx_cmd; struct iwl_geo_tx_power_profiles_resp *resp; u16 len; int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), .flags = CMD_WANT_SKB, .data = { &geo_tx_cmd }, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); /* the ops field is at the same spot for all versions, so set in v1 */ geo_tx_cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); if (cmd_ver == 5) len = sizeof(geo_tx_cmd.v5); else if (cmd_ver == 4) len = sizeof(geo_tx_cmd.v4); else if (cmd_ver == 3) len = sizeof(geo_tx_cmd.v3); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) len = sizeof(geo_tx_cmd.v2); else len = sizeof(geo_tx_cmd.v1); if (!iwl_sar_geo_support(&mvm->fwrt)) return -EOPNOTSUPP; cmd.len[0] = len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); return ret; } resp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(resp->profile_idx); if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3)) ret = -EIO; iwl_free_resp(&cmd); return ret; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); union iwl_geo_tx_power_profiles_cmd cmd; u16 len; u32 n_bands; u32 n_profiles; u32 sk = 0; int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops)); /* the ops field is at the same spot for all versions, so set in v1 */ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); if (cmd_ver == 5) { len = sizeof(cmd.v5); n_bands = ARRAY_SIZE(cmd.v5.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES_REV3; } else if (cmd_ver == 4) { len = sizeof(cmd.v4); n_bands = ARRAY_SIZE(cmd.v4.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES_REV3; } else if (cmd_ver == 3) { len = sizeof(cmd.v3); n_bands = ARRAY_SIZE(cmd.v3.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { len = sizeof(cmd.v2); n_bands = ARRAY_SIZE(cmd.v2.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } else { len = sizeof(cmd.v1); n_bands = ARRAY_SIZE(cmd.v1.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); /* the table is at the same position for all versions, so set use v1 */ ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands, n_profiles); /* * It is a valid scenario to not support SAR, or miss wgds table, * but in that case there is no need to send the command. */ if (ret) return 0; /* Only set to South Korea if the table revision is 1 */ if (mvm->fwrt.geo_rev == 1) sk = 1; /* * Set the table_revision to South Korea (1) or not (0). The * element name is misleading, as it doesn't contain the table * revision number, but whether the South Korea variation * should be used. * This must be done after calling iwl_sar_geo_init(). */ if (cmd_ver == 5) cmd.v5.table_revision = cpu_to_le32(sk); else if (cmd_ver == 4) cmd.v4.table_revision = cpu_to_le32(sk); else if (cmd_ver == 3) cmd.v3.table_revision = cpu_to_le32(sk); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) cmd.v2.table_revision = cpu_to_le32(sk); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } -static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) -{ - union acpi_object *wifi_pkg, *data, *flags; - int i, j, ret, tbl_rev, num_sub_bands; - int idx = 2; - - mvm->fwrt.ppag_flags = 0; - - data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); - if (IS_ERR(data)) - return PTR_ERR(data); - - /* try to read ppag table rev 2 or 1 (both have the same data size) */ - wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); - if (!IS_ERR(wifi_pkg)) { - if (tbl_rev == 1 || tbl_rev == 2) { - num_sub_bands = IWL_NUM_SUB_BANDS_V2; - IWL_DEBUG_RADIO(mvm, - "Reading PPAG table v2 (tbl_rev=%d)\n", - tbl_rev); - goto read_table; - } else { - ret = -EINVAL; - goto out_free; - } - } - - /* try to read ppag table revision 0 */ - wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev); - if (!IS_ERR(wifi_pkg)) { - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; - } - num_sub_bands = IWL_NUM_SUB_BANDS_V1; - IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); - goto read_table; - } - ret = PTR_ERR(wifi_pkg); - goto out_free; - -read_table: - mvm->fwrt.ppag_ver = tbl_rev; - flags = &wifi_pkg->package.elements[1]; - - if (flags->type != ACPI_TYPE_INTEGER) { - ret = -EINVAL; - goto out_free; - } - - mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK; - - if (!mvm->fwrt.ppag_flags) { - ret = 0; - goto out_free; - } - - /* - * read, verify gain values and save them into the PPAG table. - * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the - * following sub-bands to High-Band (5GHz). - */ - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { - for (j = 0; j < num_sub_bands; j++) { - union acpi_object *ent; - - ent = &wifi_pkg->package.elements[idx++]; - if (ent->type != ACPI_TYPE_INTEGER) { - ret = -EINVAL; - goto out_free; - } - - mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value; - - if ((j == 0 && - (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || - mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || - (j != 0 && - (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || - mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { - mvm->fwrt.ppag_flags = 0; - ret = -EINVAL; - goto out_free; - } - } - } - - ret = 0; -out_free: - kfree(data); - return ret; -} - int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { union iwl_ppag_table_cmd cmd; - u8 cmd_ver; - int i, j, ret, num_sub_bands, cmd_size; - s8 *gain; + int ret, cmd_size; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { - IWL_DEBUG_RADIO(mvm, - "PPAG capability not supported by FW, command not sent.\n"); - return 0; - } - if (!mvm->fwrt.ppag_flags) { - IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); - return 0; - } - - /* The 'flags' field is the same in v1 and in v2 so we can just - * use v1 to access it. - */ - cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags); - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 1) { - num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = cmd.v1.gain[0]; - cmd_size = sizeof(cmd.v1); - if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) { - IWL_DEBUG_RADIO(mvm, - "PPAG table rev is %d but FW supports v1, sending truncated table\n", - mvm->fwrt.ppag_ver); - cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); - } - } else if (cmd_ver == 2 || cmd_ver == 3) { - num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = cmd.v2.gain[0]; - cmd_size = sizeof(cmd.v2); - if (mvm->fwrt.ppag_ver == 0) { - IWL_DEBUG_RADIO(mvm, - "PPAG table is v1 but FW supports v2, sending padded table\n"); - } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) { - IWL_DEBUG_RADIO(mvm, - "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); - cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); - } - } else { - IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); - return 0; - } + ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size); + if(ret < 0) + return ret; - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { - for (j = 0; j < num_sub_bands; j++) { - gain[i * num_sub_bands + j] = - mvm->fwrt.ppag_chains[i].subbands[j]; - IWL_DEBUG_RADIO(mvm, - "PPAG table: chain[%d] band[%d]: gain = %d\n", - i, j, gain[i * num_sub_bands + j]); - } - } IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), 0, cmd_size, &cmd); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); return ret; } -static const struct dmi_system_id dmi_ppag_approved_list[] = { - { .ident = "HP", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - }, - }, - { .ident = "SAMSUNG", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), - }, - }, - { .ident = "MSFT", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - }, - }, - { .ident = "ASUS", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), - }, - }, - {} -}; - static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { /* no need to read the table, done in INIT stage */ - if (!dmi_check_system(dmi_ppag_approved_list)) { - IWL_DEBUG_RADIO(mvm, - "System vendor '%s' is not in the approved list, disabling PPAG.\n", - dmi_get_system_info(DMI_SYS_VENDOR)); - mvm->fwrt.ppag_flags = 0; + if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt))) return 0; - } return iwl_mvm_ppag_send_cmd(mvm); } static const struct dmi_system_id dmi_tas_approved_list[] = { { .ident = "HP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), }, }, { .ident = "SAMSUNG", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), }, }, { .ident = "LENOVO", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), }, }, { .ident = "DELL", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), }, }, /* keep last */ {} }; static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) { int i; u32 size = le32_to_cpu(*le_size); /* Verify that there is room for another country */ if (size >= IWL_TAS_BLOCK_LIST_MAX) return false; for (i = 0; i < size; i++) { if (list[i] == cpu_to_le32(mcc)) return true; } list[size++] = cpu_to_le32(mcc); *le_size = cpu_to_le32(size); return true; } static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); int ret; union iwl_tas_config_cmd cmd = {}; int cmd_size, fw_ver; BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) < APCI_WTAS_BLACK_LIST_MAX); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n"); return; } fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", ret); return; } if (ret == 0) return; if (!dmi_check_system(dmi_tas_approved_list)) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", dmi_get_system_info(DMI_SYS_VENDOR)); if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, &cmd.v4.block_list_size, IWL_TAS_US_MCC)) || (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, &cmd.v4.block_list_size, IWL_TAS_CANADA_MCC))) { IWL_DEBUG_RADIO(mvm, "Unable to add US/Canada to TAS block list, disabling TAS\n"); return; } } /* v4 is the same size as v3, so no need to differentiate here */ cmd_size = fw_ver < 3 ? sizeof(struct iwl_tas_config_cmd_v2) : sizeof(struct iwl_tas_config_cmd_v3); ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { u8 value; int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE, &iwl_rfi_guid, &value); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret); } else if (value >= DSM_VALUE_RFI_MAX) { IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n", value); } else if (value == DSM_VALUE_RFI_ENABLE) { IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n"); return DSM_VALUE_RFI_ENABLE; } IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n"); /* default behaviour is disabled */ return DSM_VALUE_RFI_DISABLE; } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; struct iwl_lari_config_change_cmd_v6 cmd = {}; cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT, &iwl_guid, &value); if (!ret) cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_UNII4_CHAN, &iwl_guid, &value); if (!ret) cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ACTIVATE_CHANNEL, &iwl_guid, &value); if (!ret) cmd.chan_state_active_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_6E, &iwl_guid, &value); if (!ret) cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_FORCE_DISABLE_CHANNELS, &iwl_guid, &value); if (!ret) cmd.force_disable_channels_bitmap = cpu_to_le32(value); if (cmd.config_bitmap || cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || cmd.oem_unii4_allow_bitmap || cmd.chan_state_active_bitmap || cmd.force_disable_channels_bitmap) { size_t cmd_size; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), 1); switch (cmd_ver) { case 6: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); break; case 5: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); break; case 4: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); break; case 3: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); break; case 2: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); break; default: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); break; } IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", le32_to_cpu(cmd.config_bitmap), le32_to_cpu(cmd.oem_11ax_allow_bitmap)); IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", le32_to_cpu(cmd.oem_unii4_allow_bitmap), le32_to_cpu(cmd.chan_state_active_bitmap), cmd_ver); IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", le32_to_cpu(cmd.oem_uhb_allow_bitmap), le32_to_cpu(cmd.force_disable_channels_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { int ret; /* read PPAG table */ - ret = iwl_mvm_get_ppag_table(mvm); + ret = iwl_acpi_get_ppag_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "PPAG BIOS table invalid or unavailable. (%d)\n", ret); } /* read SAR tables */ ret = iwl_sar_get_wrds_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", ret); /* * If not available, don't fail and don't bother with EWRD and * WGDS */ if (!iwl_sar_get_wgds_table(&mvm->fwrt)) { /* * If basic SAR is not available, we check for WGDS, * which should *not* be available either. If it is * available, issue an error, because we can't use SAR * Geo without basic SAR. */ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); } } else { ret = iwl_sar_get_ewrd_table(&mvm->fwrt); /* if EWRD is not available, we can still use * WRDS, so don't fail */ if (ret < 0) IWL_DEBUG_RADIO(mvm, "EWRD SAR BIOS table invalid or unavailable. (%d)\n", ret); /* read geo SAR table */ if (iwl_sar_geo_support(&mvm->fwrt)) { ret = iwl_sar_get_wgds_table(&mvm->fwrt); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ } } } #else /* CONFIG_ACPI */ inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { return 1; } inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; } int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { return 0; } static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { } static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { return DSM_VALUE_RFI_DISABLE; } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { } #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) { u32 error_log_size = mvm->fw->ucode_capa.error_log_size; int ret; u32 resp; struct iwl_fw_error_recovery_cmd recovery_cmd = { .flags = cpu_to_le32(flags), .buf_size = 0, }; struct iwl_host_cmd host_cmd = { .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), .flags = CMD_WANT_SKB, .data = {&recovery_cmd, }, .len = {sizeof(recovery_cmd), }, }; /* no error log was defined in TLV */ if (!error_log_size) return; if (flags & ERROR_RECOVERY_UPDATE_DB) { /* no buf was allocated while HW reset */ if (!mvm->error_recovery_buf) return; host_cmd.data[1] = mvm->error_recovery_buf; host_cmd.len[1] = error_log_size; host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; recovery_cmd.buf_size = cpu_to_le32(error_log_size); } ret = iwl_mvm_send_cmd(mvm, &host_cmd); kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; if (ret) { IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret); return; } /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ if (flags & ERROR_RECOVERY_UPDATE_DB) { resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); if (resp) IWL_ERR(mvm, "Failed to send recovery cmd blob was invalid %d\n", resp); } } static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { return iwl_mvm_sar_select_profile(mvm, 1, 1); } static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); ret = iwl_run_init_mvm_ucode(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); if (iwlmvm_mod_params.init_dbg) return 0; return ret; } iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; mvm->rfkill_safe_init_done = false; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) return ret; mvm->rfkill_safe_init_done = true; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband = NULL; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); if (ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); if (!iwl_trans_dbg_ini_valid(mvm->trans)) { mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); } ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; if (!iwl_mvm_has_unified_ucode(mvm)) { /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; } ret = iwl_send_phy_cfg_cmd(mvm); if (ret) goto error; ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto error; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) { ret = iwl_set_soc_latency(&mvm->fwrt); if (ret) goto error; } /* Init RSS configuration */ ret = iwl_configure_rxq(&mvm->fwrt); if (ret) goto error; if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", ret); goto error; } } /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { ret = iwl_mvm_send_dqa_cmd(mvm); if (ret) goto error; } /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses * internal aux station for all aux activities that don't * requires a dedicated data queue. */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { /* * In old version the aux station uses mac id like other * station and not lmac id */ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); if (ret) goto error; } /* Add all the PHY contexts */ i = 0; while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; if (WARN_ON_ONCE(!sband)) { ret = -ENODEV; goto error; } chan = &sband->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* * The channel used here isn't relevant as it's * going to be overwritten in the other flows. * For now use the first channel we have. */ ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], &chandef, 1, 1); if (ret) goto error; } if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting * cmd during init time */ iwl_mvm_send_temp_report_ths_cmd(mvm); } else { /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); } #ifdef CONFIG_THERMAL /* TODO: read the budget from BIOS / Platform NVM */ /* * In case there is no budget from BIOS / Platform NVM the default * budget should be 2000mW (cooling state 0). */ if (iwl_mvm_is_ctdp_supported(mvm)) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); if (ret) goto error; } #endif if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) WARN_ON(iwl_mvm_config_ltr(mvm)); ret = iwl_mvm_power_update_device(mvm); if (ret) goto error; iwl_mvm_lari_cfg(mvm); /* * RTNL is not taken during Ct-kill, but we don't need to scan/Tx * anyway, so don't init MCC. */ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { ret = iwl_mvm_init_mcc(mvm); if (ret) goto error; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); ret = iwl_mvm_ppag_init(mvm); if (ret) goto error; ret = iwl_mvm_sar_init(mvm); if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - else if (ret < 0) + if (ret < 0) goto error; ret = iwl_mvm_sgom_init(mvm); if (ret) goto error; iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); iwl_mvm_ftm_initiator_smooth_config(mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) iwl_rfi_send_config_cmd(mvm, NULL); } IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); return ret; } int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) { int ret, i; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); if (ret) { IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); goto error; } ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; /* Send phy db control command and then phy db calibration*/ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) goto error; /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses * internal aux station for all aux activities that don't * requires a dedicated data queue. * In old version the aux station uses mac id like other * station and not lmac id */ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); if (ret) goto error; } return 0; error: iwl_mvm_stop_device(mvm); return ret; } void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; IWL_DEBUG_INFO(mvm, "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", le32_to_cpu(mfuart_notif->installed_ver), le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) IWL_DEBUG_INFO(mvm, "MFUART: image size: 0x%08x\n", le32_to_cpu(mfuart_notif->image_size)); } diff --git a/sys/contrib/dev/iwlwifi/mvm/mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mac80211.c index 5a403171bc79..a7798713788e 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac80211.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac80211.c @@ -1,5757 +1,5491 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" #include "sta.h" #include "time-event.h" #include "iwl-eeprom-parse.h" #include "iwl-phy-db.h" #ifdef CONFIG_NL80211_TESTMODE #include "testmode.h" #endif #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { { .num_different_channels = 2, .max_interfaces = 3, .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, }; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -/* - * Use the reserved field to indicate magic values. - * these values will only be used internally by the driver, - * and won't make it to the fw (reserved will be 0). - * BC_FILTER_MAGIC_IP - configure the val of this attribute to - * be the vif's ip address. in case there is not a single - * ip address (0, or more than 1), this attribute will - * be skipped. - * BC_FILTER_MAGIC_MAC - set the val of this attribute to - * the LSB bytes of the vif's mac address - */ -enum { - BC_FILTER_MAGIC_NONE = 0, - BC_FILTER_MAGIC_IP, - BC_FILTER_MAGIC_MAC, -}; - -static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { - { - /* arp */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, - .attrs = { - { - /* frame type - arp, hw type - ethernet */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header), - .val = cpu_to_be32(0x08060001), - .mask = cpu_to_be32(0xffffffff), - }, - { - /* arp dest ip */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header) + 2 + - sizeof(struct arphdr) + - ETH_ALEN + sizeof(__be32) + - ETH_ALEN, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), - }, - }, - }, - { - /* dhcp offer bcast */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, - .attrs = { - { - /* udp dest port - 68 (bootp client)*/ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = offsetof(struct udphdr, dest), - .val = cpu_to_be32(0x00440000), - .mask = cpu_to_be32(0xffff0000), - }, - { - /* dhcp - lsb bytes of client hw address */ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = 38, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), - }, - }, - }, - /* last filter must be empty */ - {}, -}; -#endif - static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, .ftm = { .supported = 1, .asap = 1, .non_asap = 1, .request_lci = 1, .request_civicloc = 1, .trigger_based = 1, .non_trigger_based = 1, .max_bursts_exponent = -1, /* all supported */ .max_ftms_per_burst = 0, /* no limits */ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_HE), }, }; static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); for (i = 0; i < NUM_PHY_CTX; i++) { mvm->phy_ctxts[i].id = i; mvm->phy_ctxts[i].ref = 0; } } struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed) { struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); lockdep_assert_held(&mvm->mutex); resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); resp = NULL; goto out; } if (changed) { u32 status = le32_to_cpu(resp->status); *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, MCC_UPDATE_CMD, 0); IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); goto out; } IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); mvm->lar_regdom_set = true; mvm->mcc_src = src_id; iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); out: kfree(resp); return regd; } void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) { bool changed; struct ieee80211_regdomain *regd; if (!iwl_mvm_is_lar_supported(mvm)) return; regd = iwl_mvm_get_current_regdomain(mvm, &changed); if (!IS_ERR_OR_NULL(regd)) { /* only update the regulatory core if changed */ if (changed) regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } } struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed) { return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_GET_CURRENT : MCC_SOURCE_OLD_FW, changed); } int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; int ret; bool changed; const struct ieee80211_regdomain *r = wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); if (!r) return -ENOENT; /* save the last source in case we overwrite it below */ used_src = mvm->mcc_src; if (iwl_mvm_is_wifi_mcc_supported(mvm)) { /* Notify the firmware we support wifi location updates */ regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (!IS_ERR_OR_NULL(regd)) kfree(regd); } /* Now set our last stored MCC and source */ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, &changed); if (IS_ERR_OR_NULL(regd)) return -EIO; /* update cfg80211 if the regdomain was changed */ if (changed) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; kfree(regd); return ret; } static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), }, }; static int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); return 0; } int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; static const u32 mvm_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; #ifdef CONFIG_PM_SLEEP bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO * gets out of sync and isn't a TX command, so that we have an * assert EDC. * * It's not clear where the bug is, but since we didn't used to * support A-MSDU until moving the mac80211 iTXQs, just leave it * for older devices. We also don't see this issue on any newer * devices. */ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); } if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { /* * we absolutely need this for the new TX API since that comes * with many more queues than the current code can deal with * for station powersave */ return -EINVAL; } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; if (!iwl_mvm_has_tlc_offload(mvm)) hw->rate_control_algorithm = RS_NAME; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP_256; hw->wiphy->n_cipher_suites++; } if (iwlwifi_mod_params.swcrypto) IWL_ERR(mvm, "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); if (!iwlwifi_mod_params.bt_coex_active) IWL_ERR(mvm, "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; } - /* currently FW API supports only one optional cipher scheme */ - if (mvm->fw->cs[0].cipher) { - const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; - struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; - - mvm->hw->n_cipher_schemes = 1; - - cs->cipher = le32_to_cpu(fwcs->cipher); - cs->iftype = BIT(NL80211_IFTYPE_STATION); - cs->hdr_len = fwcs->hdr_len; - cs->pn_len = fwcs->pn_len; - cs->pn_off = fwcs->pn_off; - cs->key_idx_off = fwcs->key_idx_off; - cs->key_idx_mask = fwcs->key_idx_mask; - cs->key_idx_shift = fwcs->key_idx_shift; - cs->mic_len = fwcs->mic_len; - - mvm->hw->cipher_schemes = mvm->cs; - mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; - hw->wiphy->n_cipher_suites++; - } - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); /* The new Tx API does not allow to pass the key or keyid of a MPDU to * the hw, preventing us to control which key(id) to use per MPDU. * Till that's fixed we can't use Extended Key ID for the newer cards. */ if (!iwl_mvm_has_new_tx_api(mvm)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; else hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); hw->wiphy->addresses = mvm->addresses; hw->wiphy->n_addresses = 1; /* Extract additional MAC addresses if available */ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; for (i = 1; i < num_mac; i++) { memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, ETH_ALEN); mvm->addresses[i].addr[5]++; hw->wiphy->n_addresses++; } iwl_mvm_reset_phy_ctxts(mvm); hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { hw->wiphy->bands[NL80211_BAND_5GHZ] = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_6GHZ] = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; hw->wiphy->hw_version = mvm->trans->hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; hw->wiphy->max_sched_scan_plan_interval = U16_MAX; /* * the firmware uses u8 for num of iterations, but 0xff is saved for * infinite loop, so the maximum number of iterations is actually 254. */ hw->wiphy->max_sched_scan_plan_iterations = 254; hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN) == 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); } if (iwl_mvm_is_oce_supported(mvm)) { u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); /* Old firmware also supports probe deferral and suppression */ if (scan_ver < 15) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(he_iftypes_ext_capa); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); hw->wiphy->wowlan = &mvm->wowlan; } #endif -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* assign default bcast filtering configuration */ - mvm->bcast_filters = iwl_mvm_default_bcast_filters; -#endif - ret = iwl_mvm_leds_init(mvm); if (ret) return ret; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); iwl_mvm_vendor_cmds_register(mvm); hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); ret = ieee80211_register_hw(mvm->hw); if (ret) { iwl_mvm_leds_exit(mvm); } return ret; } static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { if (likely(sta)) { if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) return; } else { if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) return; } ieee80211_free_txskb(mvm->hw, skb); } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs * so we treat the others as broadcast */ if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) goto drop; } } iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); } void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct sk_buff *skb = NULL; /* * No need for threads to be pending here, they can leave the first * taker all the work. * * mvmtxq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing. */ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) return; rcu_read_lock(); do { while (likely(!mvmtxq->stopped && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { if (txq->sta) IWL_DEBUG_TX(mvm, "TXQ of sta %pM tid %d is now empty\n", txq->sta->addr, txq->tid); break; } iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); } static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); /* * Please note that racing is handled very carefully here: * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is * deleted afterwards. * This means that if: * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): * queue is allocated and we can TX. * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): * a race, should defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): * need to allocate the queue and defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): * queue is already scheduled for allocation, no need to allocate, * should defer the frame. */ /* If the queue is allocated TX and return. */ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { /* * Check that list is empty to avoid a race where txq_id is * already updated, but the queue allocation work wasn't * finished */ if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) return; iwl_mvm_mac_itxq_xmit(hw, txq); return; } /* The list is being deleted only after the queue is fully allocated. */ if (!list_empty(&mvmtxq->list)) return; list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ do { \ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ break; \ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, enum ieee80211_ampdu_mlme_action action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, "TX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, tid_data->ssn); break; } case IEEE80211_AMPDU_TX_STOP_CONT: CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, "TX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; case IEEE80211_AMPDU_RX_START: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, "RX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, rx_ba_ssn); break; case IEEE80211_AMPDU_RX_STOP: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, "RX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; default: break; } } static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; mutex_lock(&mvm->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == iwl_mvm_sta_from_mac80211(sta)->sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; mdata->opened_rx_ba_sessions = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, timeout); break; case IEEE80211_AMPDU_RX_STOP: ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size, amsdu); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } if (!ret) { u16 rx_ba_ssn = 0; if (action == IEEE80211_AMPDU_RX_START) rx_ba_ssn = *ssn; iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, rx_ba_ssn, action); } mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_mei_get_ownership(mvm); if (ret) return ret; if (mvm->mei_nvm_data) { /* We got the NIC, we can now free the MEI NVM data */ kfree(mvm->mei_nvm_data); mvm->mei_nvm_data = NULL; /* * We can't free the nvm_data we allocated based on the SAP * data because we registered to cfg80211 with the channels * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data * just in order to be able free it later. * NULLify nvm_data so that we will read the NVM from the * firmware this time. */ mvm->temp_nvm_data = mvm->nvm_data; mvm->nvm_data = NULL; } if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART * so later code will - from now on - see that we're doing it. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); mvm->last_reset_or_resume_time_jiffies = jiffies; if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); } return ret; } static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; int retry, max_retry = 0; mutex_lock(&mvm->mutex); /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && iwlwifi_mod_params.fw_restart) { max_retry = IWL_MAX_INIT_RETRY; /* * This will prevent mac80211 recovery flows to trigger during * init failures */ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); } for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); if (!ret) break; IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); iwl_mvm_mei_set_sw_rfkill_state(mvm); return ret; } static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (reconfig_type) { case IEEE80211_RECONFIG_TYPE_RESTART: iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: break; } } void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_mvm_ftm_initiator_smooth_stop(mvm); /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); /* async_handlers_wk is now blocked */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); /* * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); /* * The work item could be running or queued if the * ROC time event stops just as we get here. */ flush_work(&mvm->roc_done_wk); iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* * The worker might have been waiting for the mutex, let it run and * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); } static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < NUM_PHY_CTX; i++) if (!mvm->phy_ctxts[i].ref) return &mvm->phy_ctxts[i]; IWL_ERR(mvm, "No available PHY context\n"); return NULL; } static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (cmd_ver == 6) + if (cmd_ver == 7) + len = sizeof(cmd.v7); + else if (cmd_ver == 6) len = sizeof(cmd.v6); else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v3); /* all structs have the same common part, add it */ len += sizeof(cmd.common); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; goto out_unlock; } iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } } mvmvif->ps_disabled = false; ret = iwl_mvm_power_update_ps(mvm); out_unlock: if (mvmvif->csa_failed) ret = -EIO; mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; /* * In the new flow since FW is in charge of the timing, * if driver has canceled the channel switch he will receive the * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_remove_csa_period(mvm, vif); else WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); iwl_mvm_post_channel_switch(hw, vif); } static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ ieee80211_chswitch_done(vif, false); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mvmvif->mvm = mvm; RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. */ mutex_lock(&mvm->mutex); /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 * start_ap() flow), and adding the broadcast station can happen * only after the binding. * In addition, since modifying the MAC before adding a bcast * station is not allowed by the FW, delay the adding of MAC context to * the point where we can also add the bcast station. * In short: there's not much we can do at this point, other than * allocating resources :) */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; } /* * Only queue for this station is the mcast queue, * which shouldn't be in TFD mask anyway */ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 0, vif->type, IWL_STA_MULTICAST); if (ret) goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out_remove_mac; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_remove_mac; if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!mvmvif->phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped */ mvm->p2p_device_vif = vif; } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && !mvm->csme_vif && mvm->mei_registered) { iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); mvm->csme_vif = vif; } goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. * We assume here that all the packets sent to the OFFCHANNEL * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); } } static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); if (!(vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); mutex_lock(&mvm->mutex); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); mvm->csme_vif = NULL; } probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { #ifdef CONFIG_NL80211_TESTMODE if (vif == mvm->noa_vif) { mvm->noa_vif = NULL; mvm->noa_duration = 0; } #endif iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; out_release: mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; } struct iwl_mvm_mc_iter_data { struct iwl_mvm *mvm; int port_id; }; static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; struct iwl_host_cmd hcmd = { .id = MCAST_FILTER_CMD, .flags = CMD_ASYNC, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret, len; /* if we don't have free ports, mcast frames will be dropped */ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) return; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; cmd->port_id = data->port_id++; memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); hcmd.len[0] = len; hcmd.data[0] = cmd; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) { struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); /* * Send a (synchronous) ech command so that we wait for the * multiple asynchronous MCAST_FILTER_CMD commands sent by * the interface iterator. Otherwise, we might get here over * and over again (by userspace just sending a lot of these) * and the CPU can send them faster than the firmware can * process them. * Note that the CPU is still faster - but with this we'll * actually send fewer commands overall because the CPU will * not schedule the work in mac80211 as frequently if it's * still running when rescheduled (possibly multiple times). */ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); if (ret) IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; int addr_count; bool pass_all; int len; addr_count = netdev_hw_addr_list_count(mc_list); pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || IWL_MVM_FW_MCAST_FILTER_PASS_ALL; if (pass_all) addr_count = 0; len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); if (!cmd) return 0; if (pass_all) { cmd->pass_all = 1; #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } netdev_hw_addr_list_for_each(addr, mc_list) { #if defined(__linux__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", cmd->count, addr->addr); #elif defined(__FreeBSD__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n", cmd->count, addr->addr, ":"); #endif memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], addr->addr, ETH_ALEN); cmd->count++; } #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); #if defined(__linux__) struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; #elif defined(__FreeBSD__) struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast; #endif mutex_lock(&mvm->mutex); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = cmd; if (!cmd) goto out; if (changed_flags & FIF_ALLMULTI) cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); if (cmd->pass_all) cmd->count = 0; iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); *total_flags = 0; } static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* We support only filter for probe requests */ if (!(changed_flags & FIF_PROBE_REQ)) return; /* Supported only for p2p client interfaces */ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->p2p) return; mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); } -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -struct iwl_bcast_iter_data { - struct iwl_mvm *mvm; - struct iwl_bcast_filter_cmd *cmd; - u8 current_filter; -}; - -static void -iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, - const struct iwl_fw_bcast_filter *in_filter, - struct iwl_fw_bcast_filter *out_filter) -{ - struct iwl_fw_bcast_filter_attr *attr; - int i; - - memcpy(out_filter, in_filter, sizeof(*out_filter)); - - for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { - attr = &out_filter->attrs[i]; - - if (!attr->mask) - break; - - switch (attr->reserved1) { - case cpu_to_le16(BC_FILTER_MAGIC_IP): - if (vif->bss_conf.arp_addr_cnt != 1) { - attr->mask = 0; - continue; - } - - attr->val = vif->bss_conf.arp_addr_list[0]; - break; - case cpu_to_le16(BC_FILTER_MAGIC_MAC): - attr->val = *(__be32 *)&vif->addr[2]; - break; - default: - break; - } - attr->reserved1 = 0; - out_filter->num_attrs++; - } -} - -static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_bcast_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_bcast_filter_cmd *cmd = data->cmd; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_fw_bcast_mac *bcast_mac; - int i; - - if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) - return; - - bcast_mac = &cmd->macs[mvmvif->id]; - - /* - * enable filtering only for associated stations, but not for P2P - * Clients - */ - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || - !vif->bss_conf.assoc) - return; - - bcast_mac->default_discard = 1; - - /* copy all configured filters */ - for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { - /* - * Make sure we don't exceed our filters limit. - * if there is still a valid filter to be configured, - * be on the safe side and just allow bcast for this mac. - */ - if (WARN_ON_ONCE(data->current_filter >= - ARRAY_SIZE(cmd->filters))) { - bcast_mac->default_discard = 0; - bcast_mac->attached_filters = 0; - break; - } - - iwl_mvm_set_bcast_filter(vif, - &mvm->bcast_filters[i], - &cmd->filters[data->current_filter]); - - /* skip current filter if it contains no attributes */ - if (!cmd->filters[data->current_filter].num_attrs) - continue; - - /* attach the filter to current mac */ - bcast_mac->attached_filters |= - cpu_to_le16(BIT(data->current_filter)); - - data->current_filter++; - } -} - -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd) -{ - struct iwl_bcast_iter_data iter_data = { - .mvm = mvm, - .cmd = cmd, - }; - - if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) - return false; - - memset(cmd, 0, sizeof(*cmd)); - cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); - cmd->max_macs = ARRAY_SIZE(cmd->macs); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* use debugfs filters/macs if override is configured */ - if (mvm->dbgfs_bcast_filtering.override) { - memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, - sizeof(cmd->filters)); - memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, - sizeof(cmd->macs)); - return true; - } -#endif - - /* if no filters are configured, do nothing */ - if (!mvm->bcast_filters) - return false; - - /* configure and attach these filters for each associated sta vif */ - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bcast_filter_iterator, &iter_data); - - return true; -} - -static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - struct iwl_bcast_filter_cmd cmd; - - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) - return 0; - - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); -} -#else -static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - return 0; -} -#endif - static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, WLAN_MEMBERSHIP_LEN); memcpy(cmd.user_position, vif->bss_conf.mu_group.position, WLAN_USER_POSITION_LEN); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, UPDATE_MU_GROUPS_CMD), 0, sizeof(cmd), &cmd); } static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { if (vif->mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* * MU-MIMO Group Id action frame is little endian. We treat * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ ieee80211_update_mu_groups(vif, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } } void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mu_mimo_iface_iterator, notif); } static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) { u8 byte_num = ppe_pos_bit / 8; u8 bit_num = ppe_pos_bit % 8; u8 residue_bits; u8 res; if (bit_num <= 5) return (ppe[byte_num] >> bit_num) & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); /* * If bit_num > 5, we have to combine bits with next byte. * Calculate how many bits we need to take from current byte (called * here "residue_bits"), and add them to bits from next byte. */ residue_bits = 8 - bit_num; res = (ppe[byte_num + 1] & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << residue_bits; res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); return res; } static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) { int i; /* * FW currently supports only nss == MAX_HE_SUPP_NSS * * If nss > MAX: we can ignore values we don't support * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } for (i = 0; i < nss; i++) { u8 ru_index_tmp = ru_index_bitmap << 1; u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { ru_index_tmp >>= 1; if (!(ru_index_tmp & 1)) continue; high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } } static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_he_pkt_ext_v2 *pkt_ext) { u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; u8 *ppe = &sta->he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); /* Starting after PPE header */ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); } static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nominal_padding, u32 *flags) { int low_th = -1; int high_th = -1; int i; switch (nominal_padding) { case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } /* Set the PPE thresholds accordingly */ if (low_th >= 0 && high_th >= 0) { for (i = 0; i < MAX_HE_SUPP_NSS; i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } *flags |= STA_CTXT_HE_PACKET_EXT; } } static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.he_bss_color.color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); int size; struct ieee80211_sta *sta; u32 flags; int i; const struct ieee80211_sta_he_cap *own_he_cap = NULL; struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_supported_band *sband; void *cmd; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) ver = 1; switch (ver) { case 1: /* same layout as v2 except some data at the end */ cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v1); break; case 2: cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v2); break; case 3: cmd = &sta_ctxt_cmd; size = sizeof(struct iwl_he_sta_context_cmd_v3); break; default: IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); return; } rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; own_he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; } if (!sta->he_cap.has_he) { rcu_read_unlock(); return; } flags = 0; /* Block 26-tone RU OFDMA transmissions */ if (mvmvif->he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ if (sta->he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { u8 link_adap = ((sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + (sta->he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); if (link_adap == 2) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); else if (link_adap == 3) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); /* * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, &sta_ctxt_cmd.pkt_ext); flags |= STA_CTXT_HE_PACKET_EXT; /* PPE Thresholds doesn't exist - set the API PPE values * according to Common Nominal Packet Padding fiels. */ } else { u8 nominal_padding = u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, nominal_padding, &flags); } if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ flags |= STA_CTXT_HE_MU_EDCA_CW; for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = &mvmvif->queue_params[i].mu_edca_param_rec; u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); if (!mvmvif->queue_params[i].mu_edca) { flags &= ~STA_CTXT_HE_MU_EDCA_CW; break; } sta_ctxt_cmd.trig_based_txf[ac].cwmin = cpu_to_le16(mu_edca->ecw_min_max & 0xf); sta_ctxt_cmd.trig_based_txf[ac].cwmax = cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); sta_ctxt_cmd.trig_based_txf[ac].aifsn = cpu_to_le16(mu_edca->aifsn); sta_ctxt_cmd.trig_based_txf[ac].mu_time = cpu_to_le16(mu_edca->mu_edca_timer); } if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; sta_ctxt_cmd.rand_alloc_ecwmin = vif->bss_conf.uora_ocw_range & 0x7; sta_ctxt_cmd.rand_alloc_ecwmax = (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; if (vif->bss_conf.nontransmitted) { flags |= STA_CTXT_HE_REF_BSSID_VALID; ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, vif->bss_conf.transmitter_bssid); sta_ctxt_cmd.max_bssid_indicator = vif->bss_conf.bssid_indicator; sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; sta_ctxt_cmd.profile_periodicity = vif->bss_conf.profile_periodicity; } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (ver < 3) { /* fields before pkt_ext */ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, offsetof(typeof(sta_ctxt_cmd), pkt_ext)); /* pkt_ext */ for (i = 0; i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); bw++) { BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); } } /* fields after pkt_ext */ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != sizeof(sta_ctxt_cmd_v2) - offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy((u8 *)&sta_ctxt_cmd_v2 + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), (u8 *)&sta_ctxt_cmd + offsetofend(typeof(sta_ctxt_cmd), pkt_ext), sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); sta_ctxt_cmd_v2.reserved3 = 0; } if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration_override) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (duration_override > duration) duration = duration_override; /* Try really hard to protect the session and hear a beacon * The new session protection command allows us to protect the * session for a much longer time since the firmware will internally * create two events: a 300TU one with a very high priority that * won't be fragmented which should be enough for 99% of the cases, * and another one (which we configure here to be 900TU long) which * will have a slightly lower priority, but more importantly, can be * fragmented so that it'll allow other activities to run. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, min_duration, false); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; /* * Re-calculate the tsf id, as the leader-follower relations depend * on the beacon interval, which was not known when the station * interface was added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && bss_conf->assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); mvmvif->associated = bss_conf->assoc; if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); memset(&mvmvif->beacon_stats, 0, sizeof(mvmvif->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) { IWL_ERR(mvm, "failed to update quotas\n"); return; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with * the AP. It will attempt to synchronise by * itself, but we can make it more reliable by * scheduling a session protection time event. * * The firmware needs to receive a beacon to * catch up with synchronisation, use 110% of * the beacon interval. * * Set a large maximum delay to allow for more * than a single interface. * * For new firmware versions, rely on the * firmware. This is relevant for DCM scenarios * only anyway. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !vif->bss_conf.dtim_period) { /* * If we're not restarting and still haven't * heard a beacon (dtim period unknown) then * make sure we still have enough minimum time * remaining in the time event, since the auth * might actually have taken quite a while * (especially for SAE) and so the remaining * time could be small without us having heard * a beacon yet. */ iwl_mvm_protect_assoc(mvm, vif, 0); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ ret = iwl_mvm_sf_update(mvm, vif, false); WARN_ONCE(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); /* * If we get an assert during the connection (after the * station has been added, but before the vif is set * to associated), mac80211 will re-add the station and * then configure the vif. Since the vif is not * associated, we would remove the station here and * this would fail the recovery. */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) IWL_ERR(mvm, "failed to update MAC %pM (clear after unassoc)\n", vif->addr); } /* * The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { ret = iwl_mvm_update_mu_groups(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update VHT MU_MIMO groups\n"); } iwl_mvm_recalc_multicast(mvm); - iwl_mvm_configure_bcast_filter(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, IEEE80211_SMPS_AUTOMATIC); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | /* * Send power command on every beacon change, * because we may have not enabled beacon abort yet. */ BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_CQM) { IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); /* reset cqm events tracking */ mvmvif->bf_data.last_cqm_event = 0; if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) IWL_ERR(mvm, "failed to update CQM thresholds\n"); } } - if (changes & BSS_CHANGED_ARP_FILTER) { - IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); - iwl_mvm_configure_bcast_filter(mvm); - } - if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_apply_fw_smps_request(vif); } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret, i; mutex_lock(&mvm->mutex); /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); if (ret) goto out_unlock; /* * Re-calculate the tsf id, as the leader-follower relations depend on * the beacon interval, which was not known when the AP interface * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); mvmvif->ap_assoc_sta_count = 0; /* Add the mac context */ ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_unlock; /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_remove; /* * This is not very nice, but the simplest: * For older FWs adding the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW so make the order of removal depend on * the TLV */ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) { iwl_mvm_rm_mcast_sta(mvm, vif); goto out_unbind; } } else { /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) { iwl_mvm_send_rm_bcast_sta(mvm, vif); goto out_unbind; } } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; /* send all the early keys to the device now */ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; if (!key) continue; mvmvif->ap_early_keys[i] = NULL; ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); if (ret) goto out_quota_failed; } if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, true, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_quota_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); iwl_mvm_ftm_restart_responder(mvm, vif); goto out_unlock; out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_prepare_mac_removal(mvm, vif); mutex_lock(&mvm->mutex); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); mvm->csa_tx_block_bcn_timeout = 0; } mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, false, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); } iwl_mvm_bt_coex_vif_change(mvm); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_ftm_responder_clear(mvm, vif); /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW (which will stop beaconing when removing * bcast station). * So make the order of removal depend on the TLV */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); mutex_unlock(&mvm->mutex); } static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Changes will be applied when the AP/IBSS is started */ if (!mvmvif->ap_ibss_active) return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_FTM_RESPONDER) { int ret = iwl_mvm_ftm_start_responder(mvm, vif); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", ret); } } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) iwl_mvm_update_mu_groups(mvm, vif); break; default: /* shouldn't happen */ WARN_ON_ONCE(1); } if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can * happen, for instance, if we stopped the scan ourselves, * called ieee80211_scan_completed() and the userspace called * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from mac80211 */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, false); } static void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); } static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); unsigned long txqs = 0, tids = 0; int tid; /* * If we have TVQM then we get too high queue numbers - luckily * we really shouldn't get here with that because such hardware * should have firmware supporting buffer station offload. */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(mvm, tid_data) == 0) continue; __set_bit(tid, &tids); } switch (cmd) { case STA_NOTIFY_SLEEP: for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx * queues to this station will not be transmitted. The fw will * send a Tx response with TX_STATUS_FAIL_DEST_PS. */ break; case STA_NOTIFY_AWAKE: if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); iwl_mvm_sta_modify_ps_wake(mvm, sta); break; default: break; } spin_unlock_bh(&mvmsta->lock); } static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif || mvmsta->vif->type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return; } if (mvmsta->sleeping != sleeping) { mvmsta->sleeping = sleeping; __iwl_mvm_mac_sta_notify(mvm->hw, sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, sta); ieee80211_sta_ps_transition(sta, sleeping); } if (sleeping) { switch (notif->type) { case IWL_MVM_PM_EVENT_AWAKE: case IWL_MVM_PM_EVENT_ASLEEP: break; case IWL_MVM_PM_EVENT_UAPSD: ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); break; case IWL_MVM_PM_EVENT_PS_POLL: ieee80211_sta_pspoll(sta); break; default: break; } } rcu_read_unlock(); } static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected * station pointer. The rest of the code will thus no longer * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. */ mutex_lock(&mvm->mutex); if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-ENOENT)); mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { int i; if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_tcm_mac *mdata; mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); mdata->opened_rx_ba_sessions = false; } if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } if (!vif->p2p && (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } } vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } static void iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *peer_addr, enum nl80211_tdls_operation action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_TDLS); if (!trig) return; tdls_trig = (void *)trig->data; if (!(tdls_trig->action_bitmap & BIT(action))) return; if (tdls_trig->peer_mode && memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "TDLS event occurred, peer %pM, action %d", peer_addr, action); } struct iwl_mvm_he_obss_narrow_bw_ru_data { bool tolerated; }; static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct cfg80211_bss *bss, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; const struct cfg80211_bss_ies *ies; const struct element *elem; rcu_read_lock(); ies = rcu_dereference(bss->ies); elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } rcu_read_unlock(); } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { mvmvif->he_ru_2mhz_block = false; return; } cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); /* * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ mvmvif->he_ru_2mhz_block = !iter_data.tolerated; } static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; if (vif->type != NL80211_IFTYPE_STATION) return; if (!mvm->cca_40mhz_workaround) return; /* decrement and check that we reached zero */ mvm->cca_40mhz_workaround--; if (mvm->cca_40mhz_workaround) return; sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; he->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } } static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) { #if IS_ENABLED(CONFIG_IWLMEI) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mei_conn_info conn_info = { .ssid_len = vif->bss_conf.ssid_len, .channel = vif->bss_conf.chandef.chan->hw_value, }; if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return; if (!mvm->mei_registered) return; switch (mvm_sta->pairwise_cipher) { case WLAN_CIPHER_SUITE_CCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; break; case WLAN_CIPHER_SUITE_GCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; break; case WLAN_CIPHER_SUITE_GCMP_256: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; break; case 0: /* open profile */ break; default: /* cipher not supported, don't send anything to iwlmei */ return; } switch (mvmvif->rekey_data.akm) { case WLAN_AKM_SUITE_SAE & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; break; case WLAN_AKM_SUITE_PSK & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; break; case WLAN_AKM_SUITE_8021X & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; break; case 0: /* open profile */ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; break; default: /* auth method / AKM not supported */ /* TODO: All the FT vesions of these? */ return; } memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); /* TODO: add support for collocated AP data */ iwl_mei_host_associated(&conn_info, NULL); #endif } static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); /* this would be a mac80211 bug ... but don't crash */ if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; /* * If we are in a STA removal flow and in DQA mode: * * This is after the sync_rcu part, so the queues have already been * flushed. No more TXs on their way in mac80211's path, and no more in * the queues. * Also, we won't be getting any new TX frames for this station. * What we might have are deferred TX frames that need to be taken care * of. * * Drop any still-queued deferred-frame before removing the STA, and * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { flush_work(&mvm->add_stream_wk); /* * No need to make sure deferred TX indication is off since the * worker will already remove it if it was on */ /* * Additionally, reset the 40 MHz capability if we disconnected * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); } mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* * Firmware bug - it'll crash if the beacon interval is less * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will * blocklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { IWL_ERR(mvm, "AP %pM beacon interval is %d, refusing due to firmware bug!\n", sta->addr, vif->bss_conf.beacon_int); ret = -EINVAL; goto out_unlock; } if (vif->type == NL80211_IFTYPE_STATION) vif->bss_conf.he_support = sta->he_cap.has_he; if (sta->tdls && (vif->p2p || iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || iwl_mvm_phy_ctx_count(mvm) > 1)) { IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); ret = -EBUSY; goto out_unlock; } ret = iwl_mvm_add_sta(mvm, vif, sta); if (sta->tdls && ret == 0) { iwl_mvm_recalc_tdls_state(mvm, vif, true); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* * EBS may be disabled due to previous failures reported by FW. * Reset EBS status here assuming environment has been changed. */ mvm->last_ebs_successful = true; iwl_mvm_check_uapsd(mvm, vif, sta->addr); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->he_ru_2mhz_block = false; if (sta->he_cap.has_he) iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); if (sta->tdls) { iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_ENABLE_LINK); } else { /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); mvmvif->authorized = 1; /* * Now that the station is authorized, i.e., keys were already * installed, need to indicate to the FW that * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* once we move into assoc state, need to update rate scale to * disable using wide bandwidth */ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); if (!sta->tdls) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); /* * Set this after the above iwl_mvm_mac_ctxt_changed() * to avoid sending high prio again for a little time. */ mvmvif->authorized = 0; /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)); } ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) ret = 0; } else { ret = -EIO; } out_unlock: mutex_unlock(&mvm->mutex); if (sta->tdls && ret == 0) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); } return ret; } static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->rts_threshold = value; return 0; } static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) iwl_mvm_sf_update(mvm, vif, false); } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } return 0; } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_protect_assoc(mvm, vif, info->duration); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* for successful cases (auth/assoc), don't cancel session protection */ if (info->success) return; mutex_lock(&mvm->mutex); iwl_mvm_stop_session_protection(mvm, vif); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (!vif->bss_conf.idle) { ret = -EBUSY; goto out; } ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a sched_scan when it's already stopped. This * can happen, for instance, if we stopped the scan ourselves, * called ieee80211_sched_scan_stopped() and the userspace called * stop sched scan scan before ieee80211_sched_scan_stopped_work() * could run. To handle this, simply return if the scan is * not running. */ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; } static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = NULL; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; if (sta) mvmsta = iwl_mvm_sta_from_mac80211(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; } else { IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); return -EOPNOTSUPP; } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!iwl_mvm_has_new_tx_api(mvm)) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (vif->type == NL80211_IFTYPE_STATION) break; if (iwl_mvm_has_new_tx_api(mvm)) return -EOPNOTSUPP; /* support HW crypto on TX */ return 0; default: - /* currently FW supports only one optional cipher scheme */ - if (hw->n_cipher_schemes && - hw->cipher_schemes->cipher == key->cipher) - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; - else - return -EOPNOTSUPP; + return -EOPNOTSUPP; } switch (cmd) { case SET_KEY: if (keyidx == 6 || keyidx == 7) rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], key); if ((vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP) && !sta) { /* * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = -EOPNOTSUPP; break; } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; ret = 0; break; } if (!mvmvif->ap_ibss_active) { for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (!mvmvif->ap_early_keys[i]) { mvmvif->ap_early_keys[i] = key; break; } } if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; else ret = 0; break; } } /* During FW restart, in order to restore the state as it was, * don't try to reprogram keys we previously failed for. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && key->hw_key_idx == STA_KEY_IDX_INVALID) { IWL_DEBUG_MAC80211(mvm, "skip invalid idx key programming during restart\n"); ret = 0; break; } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; break; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); for (q = 0; q < mvm->trans->num_rx_queues; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); } /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; else key_offset = STA_KEY_IDX_INVALID; if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) mvmsta->pairwise_cipher = key->cipher; IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, * unless we have new TX API where we cannot * put key material into the TX_CMD */ if (iwl_mvm_has_new_tx_api(mvm)) ret = -EOPNOTSUPP; else ret = 0; } break; case DISABLE_KEY: if (keyidx == 6 || keyidx == 7) RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], NULL); ret = -ENOENT; for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (mvmvif->ap_early_keys[i] == key) { mvmvif->ap_early_keys[i] = NULL; ret = 0; } } /* found in pending list - don't do anything else */ if (ret == 0) break; if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; } if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); if (ptk_pn) kfree_rcu(ptk_pn, rcu_head); } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; } return ret; } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) return; iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); } static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_hs20_roc_res *resp; int resp_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_time_event_data *te_data = data; if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); return true; } resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); spin_lock_bh(&mvm->time_event_lock); list_add_tail(&te_data->list, &mvm->aux_roc_te_list); spin_unlock_bh(&mvm->time_event_lock); return true; } #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration) { int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; u32 dtim_interval = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), }; struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &aux_roc_req.channel_info); u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, iwl_mvm_phy_band_from_nl80211(channel->band), PHY_VHT_CHANNEL_MODE20, 0); /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and * then start the time event, this will potentially allow us to * remain off-channel for the max duration. * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ if (vif->bss_conf.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; if (req_dur <= AUX_ROC_MIN_DURATION) req_dur = dtim_interval - AUX_ROC_MIN_SAFETY_BUFFER; } } tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", duration, delay, dtim_interval); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(res); if (res) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return res; } static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; bool band_change_removal; int ret, i; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); /* * Flush the done work, just in case it's still pending, so that * the work it does can complete and we can accept new frames. */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { u32 lmac_id; lmac_id = iwl_mvm_get_lmac_id(mvm->fw, channel->band); ret = iwl_mvm_add_aux_sta(mvm, lmac_id); if (WARN(ret, "Failed to allocate aux station")) goto out_unlock; } ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); goto out_unlock; } IWL_ERR(mvm, "hotspot not supported\n"); ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); ret = -EINVAL; goto out_unlock; } for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { /* * Unbind the P2P_DEVICE from the current PHY context, * and if the PHY context is not used remove it. */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the current PHY Context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ if (channel == mvmvif->phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); /* * Check if the remain-on-channel is on a different band and that * requires context removal, see iwl_mvm_phy_ctxt_changed(). If * so, we'll need to release and then re-configure here, since we * must not remove a PHY context that's part of a binding. */ band_change_removal = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && mvmvif->phy_ctxt->channel->band != chandef.chan->band; if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { /* * Change the PHY context configuration as it is currently * referenced only by the P2P Device MAC (and we can modify it) */ ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; } else { /* * The PHY context is shared with other MACs (or we're trying to * switch bands), so remove the P2P Device from the binding, * allocate an new PHY context and create a new binding. */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 1, 1); if (ret) { IWL_ERR(mvm, "Failed to change PHY context\n"); goto out_unlock; } /* Unbind the P2P_DEVICE from the current PHY context */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the new allocated PHY context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); } schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return ret; } static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } struct iwl_mvm_ftm_responder_iter_data { bool responder; struct ieee80211_chanctx_conf *ctx; }; static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_ftm_responder_iter_data *data = _data; if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm_ftm_responder_iter_data data = { .responder = false, .ctx = ctx, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ftm_responder_chanctx_iter, &data); return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; } static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_add_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; lockdep_assert_held(&mvm->mutex); iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_remove_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); } static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; mutex_lock(&mvm->mutex); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); out_unlock: mutex_unlock(&mvm->mutex); } static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: /* only needed if we're switching chanctx (i.e. during CSA) */ if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } fallthrough; case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ ret = 0; goto out; case NL80211_IFTYPE_STATION: mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: ret = -EINVAL; goto out; } ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out; /* * Power state must be updated before quotas, * otherwise fw will complain. */ iwl_mvm_power_update_mac(mvm); /* Setting the quota at this stage is only required for monitor * interfaces. For the other types, the bss_info changed flow * will handle quota settings. */ if (vif->type == NL80211_IFTYPE_MONITOR) { mvmvif->monitor_active = true; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; ret = iwl_mvm_add_snif_sta(mvm, vif); if (ret) goto out_remove_binding; } /* Handle binding during CSA */ if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); } iwl_mvm_update_quotas(mvm, false, NULL); } goto out; out_remove_binding: iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); out: if (ret) mvmvif->phy_ctxt = NULL; return ret; } static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) goto out; mvmvif->csa_countdown = false; /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); /* Save blocked iface, the timeout is set on the next beacon */ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); mvmvif->ap_ibss_active = false; break; case NL80211_IFTYPE_STATION: if (!switching_chanctx) break; disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); iwl_mvm_binding_remove_vif(mvm, vif); out: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && switching_chanctx) return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); if (ret) { IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); goto out_reassign; } ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_remove; } /* we don't support TDLS during DCM - can be caused by channel switch */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); goto out; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_reassign; } goto out; out_reassign: if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; /* we only support a single-vif right now */ if (n_vifs > 1) return -EOPNOTSUPP; switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); break; case CHANCTX_SWMODE_REASSIGN_VIF: ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (!mvm_sta || !mvm_sta->vif) { IWL_ERR(mvm, "Station is not associated to a vif\n"); return -EINVAL; } return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); } #ifdef CONFIG_NL80211_TESTMODE static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, }; static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void *data, int len) { struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; int err; u32 noa_duration; err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, NULL); if (err) return err; if (!tb[IWL_MVM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { case IWL_MVM_TM_CMD_SET_NOA: if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || !vif->bss_conf.enable_beacon || !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) return -EINVAL; noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); if (noa_duration >= vif->bss_conf.beacon_int) return -EINVAL; mvm->noa_duration = noa_duration; mvm->noa_vif = vif; return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) return iwl_mvm_enable_beacon_filter(mvm, vif, 0); return iwl_mvm_disable_beacon_filter(mvm, vif, 0); } return -EOPNOTSUPP; } static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; mutex_lock(&mvm->mutex); err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); mutex_unlock(&mvm->mutex); return err; } #endif static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call * ieee80211_chswitch_done() ourselves at the right time * (which is when the absence time event starts). */ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "dummy channel switch op\n"); } static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); if (chsw->delay) cmd.cs_delayed_bcn_count = DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 apply_time; /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. In case count <= 1 immediately schedule the * TE (this might result with some packet loss or connection * loss). */ if (chsw->count <= 1) apply_time = 0; else apply_time = chsw->device_timestamp + ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); if (mvmvif->bf_data.bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) return ret; } iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); return 0; } #define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); mvmvif->csa_failed = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; } /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex))) { ret = -EBUSY; goto out_unlock; } rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, "Previous CSA countdown didn't complete")) { ret = -EBUSY; goto out_unlock; } mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; break; case NL80211_IFTYPE_STATION: /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) break; /* * We haven't configured the firmware to be associated yet since * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { ret = -EBUSY; goto out_unlock; } if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect */ if (!chsw->count || chsw->count * vif->bss_conf.beacon_int > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; break; default: break; } mvmvif->ps_disabled = true; ret = iwl_mvm_power_update_ps(mvm); if (ret) goto out_unlock; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; /* * In the new flow FW is in charge of timing the switch so there is no * need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); ieee80211_chswitch_done(vif, false); mvmvif->csa_misbehave = false; return; } mvmvif->csa_misbehave = true; } mvmvif->csa_count = chsw->count; mutex_lock(&mvm->mutex); if (mvmvif->csa_failed) goto out_unlock; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); out_unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) { int i; if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { mutex_lock(&mvm->mutex); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; if (drop) iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; int i; u32 msk = 0; if (!vif) { iwl_mvm_flush_no_vif(mvm, queues, drop); return; } if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; /* make sure only TDLS peers or the AP are flushed */ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); } } mutex_unlock(&mvm->mutex); /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ if (!drop && !iwl_mvm_has_new_tx_api(mvm)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; memset(survey, 0, sizeof(*survey)); /* only support global statistics right now */ if (idx != 0) return -ENOENT; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { ret = iwl_mvm_request_statistics(mvm, false); if (ret) goto out; } survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_SCAN; survey->time = mvm->accu_radio_stats.on_time_rf + mvm->radio_stats.on_time_rf; do_div(survey->time, USEC_PER_MSEC); survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; do_div(survey->time_rx, USEC_PER_MSEC); survey->time_tx = mvm->accu_radio_stats.tx_time + mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); ret = 0; out: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: rinfo->bw = RATE_INFO_BW_20; break; case RATE_MCS_CHAN_WIDTH_40: rinfo->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rinfo->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rinfo->bw = RATE_INFO_BW_160; break; } if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ if (format == RATE_MCS_LEGACY_OFDM_MSK) rate += IWL_FIRST_OFDM_RATE; switch (rate) { case IWL_RATE_1M_INDEX: rinfo->legacy = 10; break; case IWL_RATE_2M_INDEX: rinfo->legacy = 20; break; case IWL_RATE_5M_INDEX: rinfo->legacy = 55; break; case IWL_RATE_11M_INDEX: rinfo->legacy = 110; break; case IWL_RATE_6M_INDEX: rinfo->legacy = 60; break; case IWL_RATE_9M_INDEX: rinfo->legacy = 90; break; case IWL_RATE_12M_INDEX: rinfo->legacy = 120; break; case IWL_RATE_18M_INDEX: rinfo->legacy = 180; break; case IWL_RATE_24M_INDEX: rinfo->legacy = 240; break; case IWL_RATE_36M_INDEX: rinfo->legacy = 360; break; case IWL_RATE_48M_INDEX: rinfo->legacy = 480; break; case IWL_RATE_54M_INDEX: rinfo->legacy = 540; } return; } rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; rinfo->mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); if (format == RATE_MCS_HE_MSK) { u32 gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; if (rate_n_flags & RATE_MCS_HE_106T_MSK) { rinfo->bw = RATE_INFO_BW_HE_RU; rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; } switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { case RATE_MCS_HE_TYPE_SU: case RATE_MCS_HE_TYPE_EXT_SU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else if (gi_ltf == 3) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; break; case RATE_MCS_HE_TYPE_MU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; case RATE_MCS_HE_TYPE_TRIG: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; } if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; return; } if (rate_n_flags & RATE_MCS_SGI_MSK) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; if (format == RATE_MCS_HT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_MCS; } else if (format == RATE_MCS_VHT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; } } static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { sinfo->signal_avg = -(s8)mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (iwl_mvm_has_tlc_offload(mvm)) { struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; if (!vif->bss_conf.assoc) return; mutex_lock(&mvm->mutex); if (mvmvif->ap_sta_id != mvmsta->sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, NULL); return; } } static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ do { \ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ break; \ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (iwl_trans_dbg_ini_valid(mvm->trans)) { iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) return; trig_mlme = (void *)trig->data; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_assoc_denied, "DENIED ASSOC: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_assoc_timeout, "ASSOC TIMEOUT"); } else if (event->u.mlme.data == AUTH_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_auth_denied, "DENIED AUTH: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_auth_timeout, "AUTH TIMEOUT"); } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { CHECK_MLME_TRIGGER(stop_rx_deauth, "DEAUTH RX %d", event->u.mlme.reason); } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { CHECK_MLME_TRIGGER(stop_tx_deauth, "DEAUTH TX %d", event->u.mlme.reason); } #undef CHECK_MLME_TRIGGER } static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR received from %pM, tid %d, ssn %d", event->u.ba.sta->addr, event->u.ba.tid, event->u.ba.ssn); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (event->type) { case MLME_EVENT: iwl_mvm_event_mlme_callback(mvm, vif, event); break; case BAR_RX_EVENT: iwl_mvm_event_bar_rx_callback(mvm, vif, event); break; case BA_FRAME_TIMEOUT: iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event->u.ba.tid); break; default: break; } } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size) { struct { struct iwl_rxq_sync_cmd cmd; struct iwl_mvm_internal_rxq_notif notif; } __packed cmd = { .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), .cmd.count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + size), .notif.type = type, .notif.sync = sync, }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, .flags = sync ? 0 : CMD_ASYNC, }; int ret; /* size must be a multiple of DWORD */ if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) return; if (!iwl_mvm_has_new_rx_api(mvm)) return; if (sync) { cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || iwl_mvm_is_radio_killed(mvm), HZ); WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), "queue sync: failed to sync, state is 0x%lx\n", mvm->queue_sync_state); } out: if (sync) { mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; } } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) return -EINVAL; mutex_lock(&mvm->mutex); *stats = mvm->ftm_resp_stats; mutex_unlock(&mvm->mutex); stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | BIT(NL80211_FTM_STATS_PARTIAL_NUM) | BIT(NL80211_FTM_STATS_FAILED_NUM) | BIT(NL80211_FTM_STATS_ASAP_NUM) | BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); return 0; } static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_ftm_start(mvm, vif, request); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_ftm_abort(mvm, request); mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) { u8 protocol = ip_hdr(skb)->protocol; if (!IS_ENABLED(CONFIG_INET)) return false; return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; } static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return iwl_mvm_tx_csum_bz(mvm, head, true) == iwl_mvm_tx_csum_bz(mvm, skb, true); /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; if (!iwl_mvm_is_csum_supported(mvm)) return true; return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, .config = iwl_mvm_mac_config, .prepare_multicast = iwl_mvm_prepare_multicast, .configure_filter = iwl_mvm_configure_filter, .config_iface_filter = iwl_mvm_config_iface_filter, .bss_info_changed = iwl_mvm_bss_info_changed, .hw_scan = iwl_mvm_mac_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, .sta_state = iwl_mvm_mac_sta_state, .sta_notify = iwl_mvm_mac_sta_notify, .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, .update_tkip_key = iwl_mvm_mac_update_tkip_key, .remain_on_channel = iwl_mvm_roc, .cancel_remain_on_channel = iwl_mvm_cancel_roc, .add_chanctx = iwl_mvm_add_chanctx, .remove_chanctx = iwl_mvm_remove_chanctx, .change_chanctx = iwl_mvm_change_chanctx, .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, .start_ap = iwl_mvm_start_ap_ibss, .stop_ap = iwl_mvm_stop_ap_ibss, .join_ibss = iwl_mvm_start_ap_ibss, .leave_ibss = iwl_mvm_stop_ap_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, .event_callback = iwl_mvm_mac_event_callback, .sync_rx_queues = iwl_mvm_sync_rx_queues, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, .resume = iwl_mvm_resume, .set_wakeup = iwl_mvm_set_wakeup, .set_rekey_data = iwl_mvm_set_rekey_data, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = iwl_mvm_ipv6_addr_change, #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, .start_pmsr = iwl_mvm_start_pmsr, .abort_pmsr = iwl_mvm_abort_pmsr, .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif }; diff --git a/sys/contrib/dev/iwlwifi/mvm/mvm.h b/sys/contrib/dev/iwlwifi/mvm/mvm.h index ac1ed036ea73..73fd5dca45c4 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mvm.h +++ b/sys/contrib/dev/iwlwifi/mvm/mvm.h @@ -1,2247 +1,2233 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __IWL_MVM_H__ #define __IWL_MVM_H__ #include #include #ifdef CONFIG_IWLWIFI_LEDS #include #endif #include #ifdef CONFIG_THERMAL #include #endif #include #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" #include "iwl-eeprom-parse.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" #include "fw-api.h" #include "constants.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" #include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include #if defined(__FreeBSD__) #include #endif #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 #define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) /* For GO, this value represents the number of TUs before CSA "beacon * 0" TBTT when the CSA time-event needs to be scheduled to start. It * must be big enough to ensure that we switch in time. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 /* For client, this value represents the number of TUs before CSA * "beacon 1" TBTT, instead. This is because we don't know when the * GO/AP will be in the new channel, so we switch early enough. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 /* * This value (in TUs) is used to fine tune the CSA NoA end time which should * be just before "beacon 0" TBTT. */ #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 /* * Number of beacons to transmit on a new channel until we unblock tx to * the stations, even if we didn't identify them on a new channel */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 /* offchannel queue towards mac80211 */ #define IWL_MVM_OFFCHANNEL_QUEUE 0 extern const struct ieee80211_ops iwl_mvm_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. * We will register to mac80211 to have testmode working. The NIC must not * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; u32 ref; enum nl80211_chan_width width; struct ieee80211_channel *channel; /* track for RLC config command */ u32 center_freq1; }; struct iwl_mvm_time_event_data { struct ieee80211_vif *vif; struct list_head list; unsigned long end_jiffies; u32 duration; bool running; u32 uid; /* * The access to the 'id' field must be done when the * mvm->time_event_lock is held, as it value is used to indicate * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; }; /* Power management */ /** * enum iwl_power_scheme * @IWL_POWER_LEVEL_CAM - Continuously Active Mode * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) * @IWL_POWER_LEVEL_LP - Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, IWL_POWER_SCHEME_BPS, IWL_POWER_SCHEME_LP }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { u16 keep_alive_seconds; u32 rx_data_timeout; u32 tx_data_timeout; bool skip_over_dtim; u8 skip_dtim_periods; bool lprx_ena; u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; bool use_ps_poll; int mask; }; /* beacon filtering */ enum iwl_dbgfs_bf_mask { MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), }; struct iwl_dbgfs_bf { u32 bf_energy_delta; u32 bf_roaming_energy_delta; u32 bf_roaming_state; u32 bf_temp_threshold; u32 bf_temp_fast_filter; u32 bf_temp_slow_filter; u32 bf_enable_beacon_filter; u32 bf_debug_flag; u32 bf_escape_timer; u32 ba_escape_timer; u32 ba_enable_beacon_abort; int mask; }; #endif enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, IWL_MVM_SMPS_REQ_PROT, IWL_MVM_SMPS_REQ_FW, NUM_IWL_MVM_SMPS_REQ, }; enum iwl_bt_force_ant_mode { BT_FORCE_ANT_DIS = 0, BT_FORCE_ANT_AUTO, BT_FORCE_ANT_BT, BT_FORCE_ANT_WIFI, BT_FORCE_ANT_MAX, }; /** * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off * @NUM_LOW_LATENCY_FORCE: max num of modes */ enum iwl_mvm_low_latency_force { LOW_LATENCY_FORCE_UNSET, LOW_LATENCY_FORCE_ON, LOW_LATENCY_FORCE_OFF, NUM_LOW_LATENCY_FORCE }; /** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) * @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled * the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE * @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs * set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag * in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ struct iwl_mvm_vif_bf_data { bool bf_enabled; bool ba_enabled; int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; int bt_coex_max_thold; int last_bt_coex_event; }; /** * struct iwl_probe_resp_data - data for NoA/CSA updates * @rcu_head: used for freeing the data on update * @notif: notification data * @noa_len: length of NoA attribute, calculated from the notification */ struct iwl_probe_resp_data { struct rcu_head rcu_head; struct iwl_probe_resp_data_notif notif; int noa_len; }; /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA * @bssid: BSSID for this (client) interface * @associated: indicates that we're currently associated, used only for * managing the firmware state in iwl_mvm_bss_info_changed_station() * @ap_assoc_sta_count: count of stations associated to us - valid only * if VIF type is AP * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, * as a result from low_latency bit flags and takes force into account. * @authorized: indicates the AP station was set to authorized * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. * @beacon_skb: the skb used to hold the AP/GO beacon template * @smps_requests: the SMPS requests of different parts of the driver, * combined on update to yield the overall request to mac80211. * @beacon_stats: beacon statistics, containing the # of received beacons, * # of received beacons accumulated over FW restart, and the current * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later * @features: hw features active for this vif * @probe_resp_data: data from FW notification to store NOA and CSA related * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; u16 id; u16 color; u8 ap_sta_id; u8 bssid[ETH_ALEN]; bool associated; u8 ap_assoc_sta_count; u16 cab_queue; bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; u8 low_latency: 6; u8 low_latency_actual: 1; u8 authorized:1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; struct { u32 num_beacons, accu_num_beacons; u8 avg_signal; } beacon_stats; u32 ap_beacon_time; enum iwl_tsf_id tsf_id; /* * QoS data from mac80211, need to store this here * as mac80211 has a separate callback but we need * to have the data for the MAC context */ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; struct iwl_mvm_time_event_data time_event_data; struct iwl_mvm_time_event_data hs_time_event_data; struct iwl_mvm_int_sta bcast_sta; struct iwl_mvm_int_sta mcast_sta; /* * Assigned while mac80211 has the interface in a channel context, * or, for P2P Device, while it exists. */ struct iwl_mvm_phy_ctxt *phy_ctxt; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_EXT_LEN]; u8 kek[NL80211_KEK_EXT_LEN]; size_t kek_len; size_t kck_len; u32 akm; __le64 replay_ctr; bool valid; } rekey_data; int tx_key_idx; bool seqno_valid; u16 seqno; #endif #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; int num_target_ipv6_addrs; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; struct delayed_work uapsd_nonagg_detected_wk; /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; struct delayed_work csa_work; /* Indicates that we are waiting for a beacon on a new channel */ bool csa_bcn_pending; /* TCP Checksum Offload */ netdev_features_t features; struct iwl_probe_resp_data __rcu *probe_resp_data; /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; /* 26-tone RU OFDMA transmissions should be blocked */ bool he_ru_2mhz_block; struct { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; }; static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { return (void *)vif->drv_priv; } extern const u8 tid_to_mac80211_ac[]; #define IWL_MVM_SCAN_STOPPING_SHIFT 8 enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, }; enum iwl_mvm_scan_type { IWL_SCAN_TYPE_NOT_SET, IWL_SCAN_TYPE_UNASSOC, IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { SCHED_SCAN_PASS_ALL_DISABLED, SCHED_SCAN_PASS_ALL_ENABLED, SCHED_SCAN_PASS_ALL_FOUND, }; /** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. * @min_backoff: The minimal tx backoff due to power restrictions * @params: Parameters to configure the thermal throttling algorithm. * @throttle: Is thermal throttling is active? */ struct iwl_mvm_tt_mgmt { struct delayed_work ct_kill_exit; bool dynamic_smps; u32 tx_backoff; u32 min_backoff; struct iwl_tt_params params; bool throttle; }; #ifdef CONFIG_THERMAL /** *struct iwl_mvm_thermal_device - thermal zone related data * @temp_trips: temperature thresholds for report * @fw_trips_index: keep indexes to original array - temp_trips * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { s16 temp_trips[IWL_MAX_DTS_TRIPS]; u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; /* * struct iwl_mvm_cooling_device * @cur_state: current state * @cdev: struct thermal cooling device */ struct iwl_mvm_cooling_device { u32 cur_state; struct thermal_cooling_device *cdev; }; #endif #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { u32 legacy_frames; u32 ht_frames; u32 vht_frames; u32 bw_20_frames; u32 bw_40_frames; u32 bw_80_frames; u32 bw_160_frames; u32 sgi_frames; u32 ngi_frames; u32 siso_frames; u32 mimo2_frames; u32 agg_frames; u32 ampdu_count; u32 success_frames; u32 fail_frames; u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; int last_frame_idx; }; #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_IDLE = 0, IWL_MVM_TDLS_SW_REQ_SENT, IWL_MVM_TDLS_SW_RESP_RCVD, IWL_MVM_TDLS_SW_REQ_RCVD, IWL_MVM_TDLS_SW_ACTIVE, }; enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_LOW, IWL_MVM_TRAFFIC_MEDIUM, IWL_MVM_TRAFFIC_HIGH, }; DECLARE_EWMA(rate, 16, 16) struct iwl_mvm_tcm_mac { struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; } tx; struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; u32 last_ampdu_ref; } rx; struct { /* track AP's transfer in client mode */ u64 rx_bytes; struct ewma_rate rate; bool detected; } uapsd_nonagg_detect; bool opened_rx_ba_sessions; }; struct iwl_mvm_tcm { struct delayed_work work; spinlock_t lock; /* used when time elapsed */ unsigned long ts; /* timestamp when period ends */ unsigned long ll_ts; unsigned long uapsd_nonagg_ts; bool paused; struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; struct { u32 elapsed; /* milliseconds for this TCM period */ u32 airtime[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; } result; }; /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context * @consec_oldsn_drops: consecutive drops due to old SN * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track * when to apply old SN consecutive drop workaround * @consec_oldsn_prev_drop: track whether or not an MPDU * that was single/part of the previous A-MPDU was * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; struct timer_list reorder_timer; bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; unsigned int consec_oldsn_drops; u32 consec_oldsn_ampdu_gp2; unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored * @reorder_time: time the packet was stored in the reorder buffer */ struct _iwl_mvm_reorder_buf_entry { struct sk_buff_head frames; unsigned long reorder_time; }; /* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { struct _iwl_mvm_reorder_buf_entry e; } #if defined(__FreeBSD__) __aligned(roundup2(sizeof(struct _iwl_mvm_reorder_buf_entry), 32)) #elif !defined(__CHECKER__) /* sparse doesn't like this construct: "bad integer constant expression" */ /* clang on FreeBSD: error: 'aligned' attribute requires integer constant */ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) #endif ; /** * struct iwl_mvm_baid_data - BA session data * @sta_id: station id * @tid: tid of the session * @baid baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue * @reorder_buf_data: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; u8 sta_id; u8 tid; u8 baid; u16 timeout; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; struct iwl_mvm_reorder_buf_entry entries[]; }; static inline struct iwl_mvm_baid_data * iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) { return (void *)((u8 *)buf - offsetof(struct iwl_mvm_baid_data, reorder_buf) - sizeof(*buf) * buf->queue); } /* * enum iwl_mvm_queue_status - queue status * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved * Basically, this means that this queue can be used for any purpose * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use * This is the state of a queue that has been dedicated for some RATID * (agg'd or not), but that hasn't yet gone through the actual enablement * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. * Note that in this state there is no requirement to already know what TID * should be used with this queue, it is just marked as a queue that will * be used, and shouldn't be allocated to anyone else. * @IWL_MVM_QUEUE_READY: queue is ready to be used * This is the state of a queue that has been fully configured (including * SCD pointers, etc), has a specific RA/TID assigned to it, and can be * used to send traffic. * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_INVALID_QUEUE 0xFFFF #define IWL_MVM_NUM_CIPHERS 10 struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; bool stopped; }; static inline struct iwl_mvm_txq * iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) { return (void *)txq->drv_priv; } static inline struct iwl_mvm_txq * iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) { if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; return (void *)sta->txq[tid]->drv_priv; } /** * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid * * @sta_id: sta id * @txq_tid: txq tid */ struct iwl_mvm_tvqm_txq_info { u8 sta_id; u8 txq_tid; }; struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 txq_tid; /* The TID "owner" of this queue*/ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ /* Timestamp for inactivation per TID of this queue */ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; enum iwl_mvm_queue_status status; }; struct iwl_mvm { /* for logger access */ struct device *dev; struct iwl_trans *trans; const struct iwl_fw *fw; const struct iwl_cfg *cfg; struct iwl_phy_db *phy_db; struct ieee80211_hw *hw; /* for protecting access to iwl_mvm */ struct mutex mutex; struct list_head async_handlers_list; spinlock_t async_handlers_lock; struct work_struct async_handlers_wk; struct work_struct roc_done_wk; unsigned long init_status; unsigned long status; u32 queue_sync_cookie; unsigned long queue_sync_state; /* * for beacon filtering - * currently only one interface can be supported */ struct iwl_mvm_vif *bf_allowed_vif; bool hw_registered; bool rfkill_safe_init_done; u8 cca_40mhz_workaround; u32 ampdu_ref; bool ampdu_toggle; struct iwl_notif_wait_data notif_wait; union { struct mvm_statistics_rx_v3 rx_stats_v3; struct mvm_statistics_rx rx_stats; }; struct { u64 rx_time; u64 tx_time; u64 on_time_rf; u64 on_time_scan; } radio_stats, accu_radio_stats; struct list_head add_stream_txqs; union { struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ const char *nvm_file_name; struct iwl_nvm_data *nvm_data; struct iwl_mei_nvm *mei_nvm_data; struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; bool mei_rfkill_blocked; bool mei_registered; struct work_struct sap_connected_wk; /* * NVM built based on the SAP data but that we can't free even after * we get ownership because it contains the cfg80211's channel. */ struct iwl_nvm_data *temp_nvm_data; /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; u8 rx_ba_sessions; /* configured by mac80211 */ u32 rts_threshold; /* Scan status, cmd (pre-allocated) and auxiliary station */ unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type hb_scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; /* the vif that requested the current scan */ struct iwl_mvm_vif *scan_vif; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* broadcast filters to configure for each associated station */ - const struct iwl_fw_bcast_filter *bcast_filters; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct { - bool override; - struct iwl_bcast_filter_cmd cmd; - } dbgfs_bcast_filtering; -#endif -#endif - /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; bool last_ebs_successful; u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; /* * Leave this pointer outside the ifdef below so that it can be * assigned without ifdef in the source code. */ struct dentry *debugfs_dir; #ifdef CONFIG_IWLWIFI_DEBUGFS u32 dbgfs_sram_offset, dbgfs_sram_len; u32 dbgfs_prph_reg_addr; bool disable_power_off; bool disable_power_off_d3; bool beacon_inject_active; bool scan_iter_notif_enabled; struct debugfs_blob_wrapper nvm_hw_blob; struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; u16 dbgfs_rx_phyinfo; #endif struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; /* * A bitmap indicating the index of the key in use. The firmware * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; u8 *error_recovery_buf; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; #endif struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; /* sched scan settings for net detect */ struct ieee80211_scan_ies nd_ies; struct cfg80211_match_set *nd_match_sets; int n_nd_match_sets; struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; u32 d3_test_pme_ptr; struct ieee80211_vif *keep_vif; u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ #endif #endif wait_queue_head_t rx_sync_waitq; /* BT-Coex */ struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; /* Aux ROC */ struct list_head aux_roc_te_list; /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; #ifdef CONFIG_THERMAL struct iwl_mvm_thermal_device tz_device; struct iwl_mvm_cooling_device cooling_dev; #endif s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the * driver think this is the actual NIC temperature, and ignore the * real temperature that is received from the fw */ bool temperature_test; /* Debug test temperature is enabled */ bool fw_static_smps_request; unsigned long bt_coex_last_tcm_ts; struct iwl_mvm_tcm tcm; u8 uapsd_noagg_bssid_write_idx; struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2); struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE u32 noa_duration; struct ieee80211_vif *noa_vif; #endif /* Tx queues */ u16 aux_queue; u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; /* This vif used by CSME to send / receive traffic */ struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; /* indicates that we transmitted the last beacon */ bool ibss_manager; bool lar_regdom_set; enum iwl_mcc_source mcc_src; /* TDLS channel switch data */ struct { struct delayed_work dwork; enum iwl_mvm_tdls_cs_state state; /* * Current cs sta - might be different from periodic cs peer * station. Value is meaningless when the cs-state is idle. */ u8 cur_sta_id; /* TDLS periodic channel-switch peer */ struct { u8 sta_id; u8 op_class; bool initiator; /* are we the link initiator */ struct cfg80211_chan_def chandef; struct sk_buff *skb; /* ch sw template */ u32 ch_sw_tm_ie; /* timestamp of last ch-sw request sent (GP2 time) */ u32 sent_timestamp; } peer; } tdls_cs; u32 ciphers[IWL_MVM_NUM_CIPHERS]; - struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; struct cfg80211_ftm_responder_stats ftm_resp_stats; struct { struct cfg80211_pmsr_request *req; struct wireless_dev *req_wdev; struct list_head loc_list; int responses[IWL_MVM_TOF_MAX_APS]; struct { struct list_head resp; } smooth; struct list_head pasn_list; } ftm_initiator; struct list_head resp_pasn_list; struct { u8 d0i3_resp; u8 range_resp; } cmd_ver; struct ieee80211_vif *nan_vif; struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; /* * Drop beacons from other APs in AP mode when there are no connected * clients. */ bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; }; /* Extract MVM priv from op_mode and _hw */ #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log * if this is set, when intentionally triggered * @IWL_MVM_STATUS_STARTING: starting mac, * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, IWL_MVM_STATUS_STARTING, }; struct iwl_mvm_csme_conn_info { struct rcu_head rcu_head; struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); } static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); } /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct ieee80211_vif * iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) { if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) return NULL; if (rcu) return rcu_dereference(mvm->vif_id_to_mac[vif_id]); return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], lockdep_is_held(&mvm->mutex)); } static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL); } static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2); } static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP); } static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) { /* OCE should never be enabled for LMAC scan FWs */ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); } static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); } static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); } static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); } static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM */ if (mvm->cfg->nvm_type == IWL_NVM_EXT) return nvm_lar && tlv_lar; else return tlv_lar; } static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && IWL_MVM_BT_COEX_MPLUT; } static inline bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ return mvm->trans->trans_cfg->use_tfh; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: * The issue of how to determine CDB APIs and usage is still not fully * defined. * There is a compilation for CDB and non-CDB FW, but there may * be also runtime check. * For now there is a TLV for checking compilation mode, but a * runtime check will also have to be here - once defined. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) { /* * TODO: should this be the same as iwl_mvm_is_cdb_supported()? * but then there's a little bit of code in scan that won't make * any sense... */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG); } static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BAND_IN_RX_DATA); } static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_RX_STATS); } static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); } static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD); } static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) return &((struct iwl_mvm_tx_resp *)tx_resp)->status; else return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); } static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; } struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** * MVM Methods ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm); /* Utils */ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { iwl_fwrt_dump_error_logs(&mvm->fwrt); } u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd) { struct ieee80211_key_conf *keyconf = info->control.hw_key; tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); } static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); } /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : mvm->fw->valid_tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : mvm->fw->valid_rx_ant; } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) { *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant); } static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN); u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; return mvm->fw->phy_config & phy_config; } int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd); /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon); int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len); u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif); u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx); void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) { return iwl_mvm_has_quota_low_latency(mvm) ? sizeof(struct iwl_time_quota_cmd) : sizeof(struct iwl_time_quota_cmd_v1); } static inline struct iwl_time_quota_data *iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd, int i) { struct iwl_time_quota_data_v1 *quotas; if (iwl_mvm_has_quota_low_latency(mvm)) return &cmd->quotas[i]; quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas; return (struct iwl_time_quota_data *)"as[i]; } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, struct ieee80211_vif *disabled_vif); /* Scanning */ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { } static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { return 0; } static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) { } #endif /* D3 (WoWLAN, NetDetect) */ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int iwl_mvm_resume(struct ieee80211_hw *hw); void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data); void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev); void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; #ifdef CONFIG_PM void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band); u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd); #else static inline void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) {} #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, enum ieee80211_smps_mode smps_request); bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id); /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { /* * should this consider associated/active/... state? * * Normally low-latency should only be active on interfaces * that are active, but at least with debugfs it can also be * enabled on interfaces that aren't active. However, when * interface aren't active then they aren't added into the * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { u8 new_state; if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; /* * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are * allowed to actual mode. */ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) return; if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) /* * We enter force state */ new_state = !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE); else /* * Check if any other one set low latency */ new_state = !!(mvmvif->low_latency & ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | LOW_LATENCY_DEBUGFS_FORCE)); mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } void iwl_mvm_stop_device(struct iwl_mvm *mvm); /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); #if IS_ENABLED(CONFIG_IWLMEI) /* vendor commands */ void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); #else static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} #endif /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* FTM responder */ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr); int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* FTM initiator */ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm); void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm); int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr); /* TDLS */ /* * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. * This TID is marked as used vs the AP and all connected TDLS peers. */ #define IWL_MVM_TDLS_FW_TID 4 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) void iwl_mvm_tcm_work(struct work_struct *work); void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); #endif int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: return PHY_BAND_24; case NL80211_BAND_5GHZ: return PHY_BAND_5; case NL80211_BAND_6GHZ: return PHY_BAND_6; default: WARN_ONCE(1, "Unsupported band (%u)\n", band); return PHY_BAND_5; } } /* Channel info utils */ static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); } static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci) { return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? sizeof(struct iwl_fw_channel_info) : sizeof(struct iwl_fw_channel_info_v1)); } static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) { return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : sizeof(struct iwl_fw_channel_info) - sizeof(struct iwl_fw_channel_info_v1); } static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, u32 chan, u8 band, u8 width, u8 ctrl_pos) { if (iwl_mvm_has_ultra_hb_channel(mvm)) { ci->channel = cpu_to_le32(chan); ci->band = band; ci->width = width; ci->ctrl_pos = ctrl_pos; } else { struct iwl_fw_channel_info_v1 *ci_v1 = (struct iwl_fw_channel_info_v1 *)ci; ci_v1->channel = chan; ci_v1->band = band; ci_v1->width = width; ci_v1->ctrl_pos = ctrl_pos; } } static inline void iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, struct cfg80211_chan_def *chandef) { enum nl80211_band band = chandef->chan->band; iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, iwl_mvm_phy_band_from_nl80211(band), iwl_mvm_get_channel_width(chandef), iwl_mvm_get_ctrl_pos(chandef)); } static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw) { u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD, IWL_FW_CMD_VER_UNKNOWN); return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ? IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2; } static inline enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_CCMP: return IWL_LOCATION_CIPHER_CCMP_128; case WLAN_CIPHER_SUITE_GCMP: return IWL_LOCATION_CIPHER_GCMP_128; case WLAN_CIPHER_SUITE_GCMP_256: return IWL_LOCATION_CIPHER_GCMP_256; default: return IWL_LOCATION_CIPHER_INVALID; } } struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) { if (mvm->mei_registered) return iwl_mei_get_ownership(); return 0; } static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, struct sk_buff *skb, unsigned int ivlen) { if (mvm->mei_registered) iwl_mei_tx_copy_to_csme(skb, ivlen); } static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) { if (mvm->mei_registered) iwl_mei_host_disassociated(); } static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) { if (mvm->mei_registered) iwl_mei_device_down(); } static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) { bool sw_rfkill = - mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; if (mvm->mei_registered) iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), sw_rfkill); } void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool forbidden); #endif /* __IWL_MVM_H__ */ diff --git a/sys/contrib/dev/iwlwifi/mvm/ops.c b/sys/contrib/dev/iwlwifi/mvm/ops.c index 0403de7b8566..64bc3adbd8fc 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ops.c +++ b/sys/contrib/dev/iwlwifi/mvm/ops.c @@ -1,1986 +1,1985 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2020 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX iwlwifi_mvm_ #endif #include #if defined(__linux__) #include #endif #include #include #include "fw/notif-wait.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-drv.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-phy-db.h" #include "iwl-eeprom-parse.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" #include "fw/api/scan.h" #include "fw/api/rfi.h" #include "time-event.h" #include "fw-api.h" #include "fw/acpi.h" #include "fw/uefi.h" #if defined(__linux__) #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_LICENSE("GPL"); #elif defined(__FreeBSD__) #define DRV_DESCRIPTION "The new Intel(R) wireless AGN/AC/AX based driver for FreeBSD" MODULE_LICENSE("BSD"); #endif MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_IMPORT_NS(IWLWIFI); static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { #if defined(__FreeBSD__) .power_scheme = IWL_POWER_SCHEME_CAM, /* disable default PS */ #else .power_scheme = IWL_POWER_SCHEME_BPS, #endif /* rest of fields are 0 by default */ }; module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); MODULE_PARM_DESC(init_dbg, "set to true to debug an ASSERT in INIT fw (default: false"); module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); /* * module init and exit functions */ static int __init iwl_mvm_init(void) { int ret; ret = iwl_mvm_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); if (ret) pr_err("Unable to register MVM op_mode: %d\n", ret); return ret; } #if defined(__linux__) module_init(iwl_mvm_init); #elif defined(__FreeBSD__) module_init_order(iwl_mvm_init, SI_ORDER_SECOND); #endif static void __exit iwl_mvm_exit(void) { iwl_opmode_deregister("iwlmvm"); iwl_mvm_rate_control_unregister(); } module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val; u32 phy_config = iwl_mvm_get_phy_config(mvm); radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS; radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS; radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS; /* SKU control */ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); /* * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC * sampling, and shouldn't be set to any non-zero value. * The same is supposed to be true of the other HW, but unsetting * them (such as the 7260) causes automatic tests to fail on seemingly * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_D3_DEBUG, reg_val); IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step, radio_cfg_dash); /* * W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ if (!mvm->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_vif *vif; if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) return; vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); if (!vif || vif->type != NL80211_IFTYPE_STATION) return; if (!vif->bss_conf.chandef.chan || vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ || vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40) return; if (!vif->bss_conf.assoc) return; /* this shouldn't happen *again*, ignore it */ if (mvm->cca_40mhz_workaround) return; /* * We'll decrement this on disconnect - so set to 2 since we'll * still have to disconnect from the current AP first. */ mvm->cca_40mhz_workaround = 2; /* * This capability manipulation isn't really ideal, but it's the * easiest choice - otherwise we'd have to do some major changes * in mac80211 to support this, which isn't worth it. This does * mean that userspace may have outdated information, but that's * actually not an issue at all. */ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; WARN_ON(!sband->ht_cap.ht_supported); WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; WARN_ON(!he->has_he); WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)); he->he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } ieee80211_disconnect(vif, true); } void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; if (mvm->fw_static_smps_request && vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 && vif->bss_conf.he_support) mode = IEEE80211_SMPS_STATIC; iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode); } static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, struct ieee80211_vif *vif) { iwl_mvm_apply_fw_smps_request(vif); } static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; /* * We could pass it to the iterator data, but also need to remember * it for new interfaces that are added while in this state. */ mvm->fw_static_smps_request = req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, iwl_mvm_intf_dual_chain_req, NULL); } /** * enum iwl_rx_handler_context context for Rx handler * @RX_HANDLER_SYNC : this means that it will be called in the Rx path * which can't acquire mvm->mutex. * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex * (and only in this case!), it should be set as ASYNC. In that case, * it will be called from a worker with mvm->mutex held. * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the * mutex itself, it will be called from a worker without mvm->mutex held. */ enum iwl_rx_handler_context { RX_HANDLER_SYNC, RX_HANDLER_ASYNC_LOCKED, RX_HANDLER_ASYNC_UNLOCKED, }; /** * struct iwl_rx_handlers handler for FW notification * @cmd_id: command id * @min_size: minimum size to expect for the notification * @context: see &iwl_rx_handler_context * @fn: the function is called when notification is received */ struct iwl_rx_handlers { u16 cmd_id, min_size; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; #define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \ { .cmd_id = _cmd_id, .fn = _fn, .context = _context, } #define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, } #define RX_HANDLER(_cmd_id, _fn, _context, _struct) \ { .cmd_id = _cmd_id, .fn = _fn, \ .context = _context, .min_size = sizeof(_struct), } #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \ .context = _context, .min_size = sizeof(_struct), } /* * Handlers for fw notifications * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME * This list should be in order of frequency for performance purposes. * * The handler can be one from three contexts, see &iwl_rx_handler_context */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC, struct iwl_mvm_tx_resp), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC, struct iwl_mvm_ba_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC, struct iwl_tlc_update_notif), RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif), RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC, struct iwl_ba_window_status_notif), RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, RX_HANDLER_SYNC, struct iwl_time_event_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC, struct iwl_mvm_session_prot_notif), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif), RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC, struct iwl_mvm_eosp_notification), RX_HANDLER(SCAN_ITERATION_COMPLETE, iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_lmac_scan_complete_notif), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, iwl_mvm_rx_lmac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete), RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, struct iwl_error_resp), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC, struct iwl_uapsd_misbehaving_ap_notif), RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC, struct ct_kill_notif), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_tdls_channel_switch_notif), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1), RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED, struct iwl_ftm_responder_stats), RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF, iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC, struct iwl_mfu_assert_dump_notif), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC, struct iwl_stored_beacon_notif_v2), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC, struct iwl_mu_group_mgmt_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC, struct iwl_mvm_pm_state_notification), RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, iwl_mvm_probe_resp_data_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_probe_resp_data_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, iwl_mvm_channel_switch_start_notif, RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, iwl_mvm_channel_switch_error_notif, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_channel_switch_error_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST, iwl_mvm_rx_thermal_dual_chain_req, RX_HANDLER_ASYNC_LOCKED, struct iwl_thermal_dual_chain_request), RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF, iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_rfi_deactivate_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(UCODE_ALIVE_NTFY), HCMD_NAME(REPLY_ERROR), HCMD_NAME(ECHO_CMD), HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(DBG_CFG), HCMD_NAME(SCAN_CFG_CMD), HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), HCMD_NAME(FW_GET_ITEM_CMD), HCMD_NAME(TX_CMD), HCMD_NAME(SCD_QUEUE_CFG), HCMD_NAME(TXPATH_FLUSH), HCMD_NAME(MGMT_MCAST_KEY), HCMD_NAME(WEP_KEY), HCMD_NAME(SHARED_MEM_CFG), HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), HCMD_NAME(MAC_CONTEXT_CMD), HCMD_NAME(TIME_EVENT_CMD), HCMD_NAME(TIME_EVENT_NOTIFICATION), HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(TIME_QUOTA_CMD), HCMD_NAME(NON_QOS_TX_COUNTER_CMD), HCMD_NAME(LEDS_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), HCMD_NAME(HOT_SPOT_CMD), HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), HCMD_NAME(PHY_DB_CMD), HCMD_NAME(SCAN_OFFLOAD_COMPLETE), HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), HCMD_NAME(POWER_TABLE_CMD), HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), HCMD_NAME(NVM_ACCESS_CMD), HCMD_NAME(BEACON_NOTIFICATION), HCMD_NAME(BEACON_TEMPLATE_CMD), HCMD_NAME(TX_ANT_CONFIGURATION_CMD), HCMD_NAME(BT_CONFIG), HCMD_NAME(STATISTICS_CMD), HCMD_NAME(STATISTICS_NOTIFICATION), HCMD_NAME(EOSP_NOTIFICATION), HCMD_NAME(REDUCE_TX_POWER_CMD), HCMD_NAME(MISSED_BEACONS_NOTIFICATION), HCMD_NAME(TDLS_CONFIG_CMD), HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), HCMD_NAME(BAR_FRAME_RELEASE), HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), HCMD_NAME(MCC_CHUB_UPDATE_CMD), HCMD_NAME(MARKER_CMD), HCMD_NAME(BT_PROFILE_NOTIFICATION), - HCMD_NAME(BCAST_FILTER_CMD), HCMD_NAME(MCAST_FILTER_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), HCMD_NAME(REPLY_BEACON_FILTERING_CMD), HCMD_NAME(D3_CONFIG_CMD), HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), HCMD_NAME(OFFLOADS_QUERY_CMD), HCMD_NAME(MATCH_FOUND_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), HCMD_NAME(WOWLAN_PATTERNS), HCMD_NAME(WOWLAN_CONFIGURATION), HCMD_NAME(WOWLAN_TSC_RSC_PARAM), HCMD_NAME(WOWLAN_TKIP_PARAM), HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), HCMD_NAME(WOWLAN_GET_STATUSES), HCMD_NAME(SCAN_ITERATION_COMPLETE), HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), HCMD_NAME(LDBG_CONFIG_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), HCMD_NAME(SESSION_PROTECTION_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(SCD_QUEUE_CONFIG_CMD), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_location_names[] = { HCMD_NAME(TOF_RANGE_REQ_CMD), HCMD_NAME(TOF_CONFIG_CMD), HCMD_NAME(TOF_RANGE_ABORT_CMD), HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), HCMD_NAME(TOF_LC_NOTIF), HCMD_NAME(TOF_RESPONDER_STATS), HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { HCMD_NAME(NVM_ACCESS_COMPLETE), HCMD_NAME(NVM_GET_INFO), HCMD_NAME(TAS_CONFIG), }; static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), }; /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; u64 dflt_pwr_limit; if (!backoff) return 0; dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); while (backoff->pwr) { if (dflt_pwr_limit >= backoff->pwr) return backoff->backoff; backoff++; } return 0; } static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); struct ieee80211_vif *tx_blocked_vif; struct iwl_mvm_vif *mvmvif; mutex_lock(&mvm->mutex); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (!tx_blocked_vif) goto unlock; mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_start(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_lock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_end(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_unlock(&mvm->mutex); } static bool iwl_mvm_fwrt_fw_running(void *ctx) { return iwl_mvm_firmware_running(ctx); } static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, host_cmd); mutex_unlock(&mvm->mutex); return ret; } static bool iwl_mvm_d3_debug_enable(void *ctx) { return IWL_MVM_D3_DEBUG; } static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, .send_hcmd = iwl_mvm_fwrt_send_hcmd, .d3_debug_enable = iwl_mvm_d3_debug_enable, }; static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; int ret; if (trans->csme_own) { if (WARN(!mvm->mei_registered, "csme is owner, but we aren't registered to iwlmei\n")) goto get_nvm_from_fw; mvm->mei_nvm_data = iwl_mei_get_nvm(); if (mvm->mei_nvm_data) { /* * mvm->mei_nvm_data is set and because of that, * we'll load the NVM from the FW when we'll get * ownership. */ mvm->nvm_data = iwl_parse_mei_nvm_data(trans, trans->cfg, mvm->mei_nvm_data, mvm->fw); return 0; } IWL_ERR(mvm, "Got a NULL NVM from CSME, trying to get it from the device\n"); } get_nvm_from_fw: rtnl_lock(); wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) { mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); return ret; } ret = iwl_run_init_mvm_ucode(mvm); if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!ret && iwl_mvm_is_lar_supported(mvm)) { mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; ret = iwl_mvm_init_mcc(mvm); } if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); return ret; } static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) { struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; int ret; iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); ret = iwl_mvm_mac_setup_register(mvm); if (ret) return ret; mvm->hw_registered = true; iwl_mvm_dbgfs_register(mvm); wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, mvm->mei_rfkill_blocked, RFKILL_HARD_BLOCK_NOT_OWNER); iwl_mvm_mei_set_sw_rfkill_state(mvm); return 0; } struct iwl_mvm_frob_txf_data { u8 *buf; size_t buflen; }; static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct iwl_mvm_frob_txf_data *txf = data; u8 keylen, match, matchend; u8 *keydata; size_t i; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: keydata = key->key; keylen = key->keylen; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: /* * WEP has short keys which might show up in the payload, * and then you can deduce the key, so in this case just * remove all FIFO data. * For TKIP, we don't know the phase 2 keys here, so same. */ memset(txf->buf, 0xBB, txf->buflen); return; default: return; } /* scan for key material and clear it out */ match = 0; for (i = 0; i < txf->buflen; i++) { if (txf->buf[i] != keydata[match]) { match = 0; continue; } match++; if (match == keylen) { memset(txf->buf + i - keylen, 0xAA, keylen); match = 0; } } /* we're dealing with a FIFO, so check wrapped around data */ matchend = match; for (i = 0; match && i < keylen - match; i++) { if (txf->buf[i] != keydata[match]) break; match++; if (match == keylen) { memset(txf->buf, 0xAA, i + 1); memset(txf->buf + txf->buflen - matchend, 0xAA, matchend); break; } } } static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen) { struct iwl_mvm_frob_txf_data txf = { .buf = buf, .buflen = buflen, }; struct iwl_mvm *mvm = ctx; /* embedded key material exists only on old API */ if (iwl_mvm_has_new_tx_api(mvm)) return; rcu_read_lock(); ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); rcu_read_unlock(); } static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len) { /* we only use wide headers for commands */ struct iwl_cmd_header_wide *hdr = hcmd; unsigned int frob_start = sizeof(*hdr), frob_end = 0; if (len < sizeof(hdr)) return; /* all the commands we care about are in LONG_GROUP */ if (hdr->group_id != LONG_GROUP) return; switch (hdr->cmd) { case WEP_KEY: case WOWLAN_TKIP_PARAM: case WOWLAN_KEK_KCK_MATERIAL: case ADD_STA_KEY: /* * blank out everything here, easier than dealing * with the various versions of the command */ frob_end = INT_MAX; break; case MGMT_MCAST_KEY: frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) != offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) < offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); break; } if (frob_start >= frob_end) return; if (frob_end > len) frob_end = len; memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); } static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen) { const struct iwl_dump_exclude *excl; struct iwl_mvm *mvm = ctx; int i; switch (mvm->fwrt.cur_fw_img) { case IWL_UCODE_INIT: default: /* not relevant */ return; case IWL_UCODE_REGULAR: case IWL_UCODE_REGULAR_USNIFFER: excl = mvm->fw->dump_excl; break; case IWL_UCODE_WOWLAN: excl = mvm->fw->dump_excl_wowlan; break; } BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != sizeof(mvm->fw->dump_excl_wowlan)); for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { u32 start, end; if (!excl[i].addr || !excl[i].size) continue; start = excl[i].addr; end = start + excl[i].size; if (end <= mem_addr || start >= mem_addr + buflen) continue; if (start < mem_addr) start = mem_addr; if (end > mem_addr + buflen) end = mem_addr + buflen; memset((u8 *)mem + start - mem_addr, 0xAA, end - start); } } static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { .frob_txf = iwl_mvm_frob_txf, .frob_hcmd = iwl_mvm_frob_hcmd, .frob_mem = iwl_mvm_frob_mem, }; static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) { struct iwl_mvm *mvm = priv; struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; /* * This is protected by the guarantee that this function will not be * called twice on two different threads */ prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); if (!curr_conn_info) return; curr_conn_info->conn_info = *conn_info; rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); if (prev_conn_info) kfree_rcu(prev_conn_info, rcu_head); } static void iwl_mvm_mei_rfkill(void *priv, bool blocked) { struct iwl_mvm *mvm = priv; mvm->mei_rfkill_blocked = blocked; if (!mvm->hw_registered) return; wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, mvm->mei_rfkill_blocked, RFKILL_HARD_BLOCK_NOT_OWNER); } static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) { struct iwl_mvm *mvm = priv; if (!mvm->hw_registered || !mvm->csme_vif) return; iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); } static void iwl_mvm_sap_connected_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sap_connected_wk); int ret; ret = iwl_mvm_start_get_nvm(mvm); if (ret) goto out_free; ret = iwl_mvm_start_post_nvm(mvm); if (ret) goto out_free; return; out_free: IWL_ERR(mvm, "Couldn't get started...\n"); iwl_mei_start_unregister(); iwl_mei_unregister_complete(); iwl_fw_flush_dumps(&mvm->fwrt); iwl_mvm_thermal_exit(mvm); iwl_fw_runtime_free(&mvm->fwrt); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(mvm->trans); kfree(mvm->nvm_data); kfree(mvm->mei_nvm_data); ieee80211_free_hw(mvm->hw); } static void iwl_mvm_mei_sap_connected(void *priv) { struct iwl_mvm *mvm = priv; if (!mvm->hw_registered) schedule_work(&mvm->sap_connected_wk); } static void iwl_mvm_mei_nic_stolen(void *priv) { struct iwl_mvm *mvm = priv; rtnl_lock(); cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); rtnl_unlock(); } static const struct iwl_mei_ops mei_ops = { .me_conn_status = iwl_mvm_me_conn_status, .rfkill = iwl_mvm_mei_rfkill, .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, .sap_connected = iwl_mvm_mei_sap_connected, .nic_stolen = iwl_mvm_mei_nic_stolen, }; static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; struct iwl_mvm *mvm; struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { TX_CMD, }; int scan_size; u32 min_backoff; struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station * index all over the driver - check that its value corresponds to the * array size. */ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT_MAX); /******************************** * 1. Allocating and configuring HW data ********************************/ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + sizeof(struct iwl_mvm), &iwl_mvm_hw_ops); if (!hw) return NULL; hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; op_mode = hw->priv; mvm = IWL_OP_MODE_GET_MVM(op_mode); mvm->dev = trans->dev; mvm->trans = trans; mvm->cfg = cfg; mvm->fw = fw; mvm->hw = hw; iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); iwl_mvm_get_acpi_tables(mvm); iwl_uefi_get_sgom_table(trans, &mvm->fwrt); mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; trans->rx_mpdu_cmd_hdr_size = (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); if (WARN_ON(trans->num_rx_queues > 1)) goto out_free; } mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; if (iwl_mvm_has_new_tx_api(mvm)) { /* * If we have the new TX/queue allocation API initialize them * all to invalid numbers. We'll rewrite the ones that we need * later, but that doesn't happen for all of them all of the * time (e.g. P2P Device is optional), and if a dynamic queue * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then * iwl_mvm_is_static_queue() erroneously returns true, and we * might have things getting stuck. */ mvm->aux_queue = IWL_MVM_INVALID_QUEUE; mvm->snif_queue = IWL_MVM_INVALID_QUEUE; mvm->probe_queue = IWL_MVM_INVALID_QUEUE; mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; } else { mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; } mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); spin_lock_init(&mvm->async_handlers_lock); INIT_LIST_HEAD(&mvm->time_event_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); INIT_LIST_HEAD(&mvm->resp_pasn_list); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); init_waitqueue_head(&mvm->rx_sync_waitq); mvm->queue_sync_state = 0; SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); spin_lock_init(&mvm->tcm.lock); INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); mvm->tcm.ts = jiffies; mvm->tcm.ll_ts = jiffies; mvm->tcm.uapsd_nonagg_ts = jiffies; INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); mvm->cmd_ver.d0i3_resp = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD, 0); /* we only support version 1 */ if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1)) goto out_free; mvm->cmd_ver.range_resp = iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 5); /* we only support up to version 9 */ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) goto out_free; /* * Populate the state variables that the transport layer needs * to know about. */ trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_8K: trans_cfg.rx_buf_size = IWL_AMSDU_8K; break; case IWL_AMSDU_12K: trans_cfg.rx_buf_size = IWL_AMSDU_12K; break; default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); trans_cfg.rx_buf_size = IWL_AMSDU_4K; } trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); /* Set a short watchdog for the command queue */ trans_cfg.cmd_q_wdg_timeout = iwl_mvm_get_wd_timeout(mvm, NULL, false, true); snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%s", fw->fw_version); trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); trans_cfg.queue_alloc_cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), 0); mvm->sta_remove_requires_queue_remove = trans_cfg.queue_alloc_cmd_ver > 0; /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg.conf_tlv)); trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); /* Init phy db */ mvm->phy_db = iwl_phy_db_init(trans); if (!mvm->phy_db) { IWL_ERR(mvm, "Cannot init phy_db\n"); goto out_free; } IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->trans->name, mvm->trans->hw_rev); if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); if (!mvm->scan_cmd) goto out_free; /* invalidate ids to prevent accidental removal of sta_id 0 */ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; min_backoff = iwl_mvm_min_backoff(mvm); iwl_mvm_thermal_initialize(mvm, min_backoff); if (!iwl_mvm_has_new_rx_stats_api(mvm)) memset(&mvm->rx_stats_v3, 0, sizeof(struct mvm_statistics_rx_v3)); else memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); mvm->debugfs_dir = dbgfs_dir; mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); if (iwl_mvm_start_get_nvm(mvm)) { /* * Getting NVM failed while CSME is the owner, but we are * registered to MEI, we'll get the NVM later when it'll be * possible to get it from CSME. */ if (trans->csme_own && mvm->mei_registered) return op_mode; goto out_thermal_exit; } if (iwl_mvm_start_post_nvm(mvm)) goto out_thermal_exit; return op_mode; out_thermal_exit: iwl_mvm_thermal_exit(mvm); if (mvm->mei_registered) { iwl_mei_start_unregister(); iwl_mei_unregister_complete(); } out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(trans); ieee80211_free_hw(mvm->hw); return NULL; } void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_fw_cancel_timestamp(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); iwl_mvm_mei_device_down(mvm); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; if (mvm->mei_registered) { rtnl_lock(); iwl_mei_set_netdev(NULL); rtnl_unlock(); iwl_mei_start_unregister(); } /* * After we unregister from mei, the worker can't be scheduled * anymore. */ cancel_work_sync(&mvm->sap_connected_wk); iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); /* * If we couldn't get ownership on the device and we couldn't * get the NVM from CSME, we haven't registered to mac80211. * In that case, we didn't fail op_mode_start, because we are * waiting for CSME to allow us to get the NVM to register to * mac80211. If that didn't happen, we haven't registered to * mac80211, hence the if below. */ if (mvm->hw_registered) ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = NULL; kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); mvm->phy_db = NULL; kfree(mvm->nvm_data); kfree(mvm->mei_nvm_data); kfree(rcu_access_pointer(mvm->csme_conn_info)); kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); cancel_delayed_work_sync(&mvm->tcm.work); iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); if (mvm->mei_registered) iwl_mei_unregister_complete(); ieee80211_free_hw(mvm->hw); } struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) { struct iwl_async_handler_entry *entry, *tmp; spin_lock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { iwl_free_rxb(&entry->rxb); list_del(&entry->list); kfree(entry); } spin_unlock_bh(&mvm->async_handlers_lock); } static void iwl_mvm_async_handlers_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, async_handlers_wk); struct iwl_async_handler_entry *entry, *tmp; LIST_HEAD(local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ /* * Sync with Rx path with a lock. Remove all the entries from this list, * add them to a local one (lock free), and then handle them. */ spin_lock_bh(&mvm->async_handlers_lock); list_splice_init(&mvm->async_handlers_list, &local_list); spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_unlock(&mvm->mutex); kfree(entry); } } static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_FW_NOTIF); if (!trig) return; cmds_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) break; if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "CMD 0x%02x.%02x received", pkt->hdr.group_id, pkt->hdr.cmd); break; } } static void iwl_mvm_rx_common(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_packet *pkt) { unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); int i; union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data); iwl_mvm_rx_check_trigger(mvm, pkt); /* * Do the notification wait before RX handlers so * even if the RX handler consumes the RXB we have * access to it in the notification wait entry. */ iwl_notification_wait_notify(&mvm->notif_wait, pkt); for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; struct iwl_async_handler_entry *entry; if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; if (unlikely(pkt_len < rx_h->min_size)) return; if (rx_h->context == RX_HANDLER_SYNC) { rx_h->fn(mvm, rxb); return; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); /* we can't do much... */ if (!entry) return; entry->rxb._page = rxb_steal_page(rxb); entry->rxb._offset = rxb->_offset; entry->rxb._rx_page_order = rxb->_rx_page_order; entry->fn = rx_h->fn; entry->context = rx_h->context; spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); break; } } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else iwl_mvm_rx_common(mvm, rxb, pkt); } void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); /* * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA * commands that need to block the Tx queues. */ iwl_trans_block_txq_ptrs(mvm->trans, false); } static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) { return queue == mvm->aux_queue || queue == mvm->probe_queue || queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; } static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, int hw_queue, bool start) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_sta *sta; struct ieee80211_txq *txq; struct iwl_mvm_txq *mvmtxq; int i; unsigned long tid_bitmap; struct iwl_mvm_sta *mvmsta; u8 sta_id; sta_id = iwl_mvm_has_new_tx_api(mvm) ? mvm->tvqm_info[hw_queue].sta_id : mvm->queue_info[hw_queue].ra_sta_id; if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (iwl_mvm_is_static_queue(mvm, hw_queue)) { if (!start) ieee80211_stop_queues(mvm->hw); else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) ieee80211_wake_queues(mvm->hw); goto out; } if (iwl_mvm_has_new_tx_api(mvm)) { int tid = mvm->tvqm_info[hw_queue].txq_tid; tid_bitmap = BIT(tid); } else { tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; } for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { int tid = i; if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; txq = sta->txq[tid]; mvmtxq = iwl_mvm_txq_from_mac80211(txq); mvmtxq->stopped = !start; if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) iwl_mvm_mac_itxq_xmit(mvm->hw, txq); } out: rcu_read_unlock(); } static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, false); } static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, true); } static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) { bool state = iwl_mvm_is_radio_killed(mvm); if (state) wake_up(&mvm->rx_sync_waitq); wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); } void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) { if (state) set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); } struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) { return rcu_dereference_protected(mvm->csme_conn_info, lockdep_is_held(&mvm->mutex)); } static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); bool unified = iwl_mvm_has_unified_ucode(mvm); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ if (rfkill_safe_init_done) iwl_abort_notification_waits(&mvm->notif_wait); /* * Don't ask the transport to stop the firmware. We'll do it * after cfg80211 takes us down. */ if (unified) return false; /* * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ return state && rfkill_safe_init_done; } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); ieee80211_free_txskb(mvm->hw, skb); } struct iwl_mvm_reprobe { struct device *dev; struct work_struct work; }; static void iwl_mvm_reprobe_wk(struct work_struct *wk) { struct iwl_mvm_reprobe *reprobe; reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); iwl_dbg_tlv_del_timers(mvm->trans); /* * This is a bit racy, but worst case we tell mac80211 about * a stopped/aborted scan when that was already done which * is not a problem. It is necessary to abort any os scan * here because mac80211 requires having the scan cleared * before restarting. * We'll reset the scan_status to NONE in restart cleanup in * the next start() call from mac80211. If restart isn't called * (no fw restart) scan status will stay busy. */ iwl_mvm_report_scan_aborted(mvm); /* * If we're restarting already, don't cycle restarts. * If INIT fw asserted, it will likely fail again. * If WoWLAN fw asserted, don't restart either, mac80211 * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); } else if (test_bit(IWL_MVM_STATUS_STARTING, &mvm->status)) { IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; IWL_ERR(mvm, "Firmware error during reconfiguration - reprobe!\n"); /* * get a module reference to avoid doing this while unloading * anyway and to avoid scheduling a work with code that's * being removed. */ if (!try_module_get(THIS_MODULE)) { IWL_ERR(mvm, "Module is being unloaded - abort\n"); return; } reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); if (!reprobe) { module_put(THIS_MODULE); return; } reprobe->dev = get_device(mvm->trans->dev); INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { IWL_ERR(mvm, "HW restart already requested, but not started\n"); } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { /* This should be first thing before trying to collect any * data to avoid endless loops if any HW error happens while * collecting debug data. */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); if (recover_buf) { mvm->error_recovery_buf = recover_buf; iwl_trans_read_mem_bytes(mvm->trans, src_addr, recover_buf, src_size); } } iwl_fw_error_collect(&mvm->fwrt, false); if (fw_error && mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); } else if (mvm->fwrt.trans->dbg.restart_required) { IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); mvm->fwrt.trans->dbg.restart_required = FALSE; ieee80211_restart_hw(mvm->hw); } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { ieee80211_restart_hw(mvm->hw); } } } static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status)) iwl_mvm_dump_nic_error_log(mvm); if (sync) { iwl_fw_error_collect(&mvm->fwrt, true); /* * Currently, the only case for sync=true is during * shutdown, so just stop in this case. If/when that * changes, we need to be a bit smarter here. */ return; } /* * If the firmware crashes while we're already considering it * to be dead then don't ask for a restart, that cannot do * anything useful anyway. */ if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) return; iwl_mvm_nic_restart(mvm, false); } static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); WARN_ON(1); iwl_mvm_nic_restart(mvm, true); } static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); } #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ .async_cb = iwl_mvm_async_cb, \ .queue_full = iwl_mvm_stop_sw_queue, \ .queue_not_full = iwl_mvm_wake_sw_queue, \ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ .free_skb = iwl_mvm_free_skb, \ .nic_error = iwl_mvm_nic_error, \ .cmd_queue_full = iwl_mvm_cmd_queue_full, \ .nic_config = iwl_mvm_nic_config, \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop, \ .time_point = iwl_op_mode_mvm_time_point static const struct iwl_op_mode_ops iwl_mvm_ops = { IWL_MVM_COMMON_OPS, .rx = iwl_mvm_rx, }; static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (unlikely(queue >= mvm->trans->num_rx_queues)) return; if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { IWL_MVM_COMMON_OPS, .rx = iwl_mvm_rx_mq, .rx_rss = iwl_mvm_rx_mq_rss, }; diff --git a/sys/contrib/dev/iwlwifi/mvm/tx.c b/sys/contrib/dev/iwlwifi/mvm/tx.c index e2ecede55bff..7763037b93ed 100644 --- a/sys/contrib/dev/iwlwifi/mvm/tx.c +++ b/sys/contrib/dev/iwlwifi/mvm/tx.c @@ -1,2225 +1,2225 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, u16 tid, u16 ssn) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR sent to %pM, tid %d, ssn %d", addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; /* Do not compute checksum if already computed */ if (skb->ip_summed != CHECKSUM_PARTIAL) goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); goto out; } if (skb->protocol == htons(ETH_P_IP)) { protocol = ip_hdr(skb)->protocol; } else { #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_network_header(skb); unsigned int off = sizeof(*ipv6h); protocol = ipv6h->nexthdr; while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { struct ipv6_opt_hdr *hp; /* only supported extension headers */ if (protocol != NEXTHDR_ROUTING && protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); protocol = hp->nexthdr; off += ipv6_optlen(hp); } /* if we get here - protocol now should be TCP/UDP */ #endif } if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); goto out; } /* enable L4 csum */ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); /* * Set offset to IP header (snap). * We don't support tunneling so no need to take care of inner header. * Size is in words. */ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ if (skb->protocol == htons(ETH_P_IP) && amsdu) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } /* reset UDP/TCP header csum */ if (protocol == IPPROTO_TCP) tcp_hdr(skb)->check = 0; else udp_hdr(skb)->check = 0; /* * mac header len should include IV, size is in words unless * the IV is added by the firmware like in WEP. * In new Tx API, the IV is always added by the firmware. */ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) mh_len += info->control.hw_key->iv_len; mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; out: #endif if (amsdu) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); else if (ieee80211_hdrlen(hdr->frame_control) % 4) /* padding is inserted later in transport */ offload_assist |= BIT(TX_CMD_OFFLD_PAD); return offload_assist; } u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); unsigned int csum_start = skb_checksum_start_offset(skb); offload_assist |= u32_encode_bits(hdrlen / 2, IWL_TX_CMD_OFFLD_BZ_MH_LEN); if (amsdu) offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; else if (hdrlen % 4) /* padding is inserted later in transport */ offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; if (skb->ip_summed != CHECKSUM_PARTIAL) return offload_assist; offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; /* * mac80211 will always calculate checksum in software for * non-fast-xmit, and so we can only do offloaded checksum * for fast-xmit frames. In this case, we always have the * RFC 1042 header present. skb_checksum_start_offset() * returns the offset from the beginning, but the hardware * needs it from after the header & SNAP header. */ csum_start -= hdrlen + 8; offload_assist |= u32_encode_bits(csum_start, IWL_TX_CMD_OFFLD_BZ_START_OFFS); offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); return offload_assist; } static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, bool amsdu) { if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); } /* * Sets most of the Tx cmd's fields */ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; bool amsdu = false; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || (ieee80211_is_probe_resp(fc) && !is_multicast_ether_addr(hdr->addr1))) tx_flags |= TX_CMD_FLG_ACK; else tx_flags &= ~TX_CMD_FLG_ACK; if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, ssn); } else { if (ieee80211_is_data(fc)) tx_cmd->tid_tspec = IWL_TID_NON_QOS; else tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) tx_flags |= TX_CMD_FLG_SEQ_CTL; else tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; else ac = tid_to_mac80211_ac[0]; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); else if (ieee80211_is_action(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && !is_multicast_ether_addr(hdr->addr1)) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; tx_cmd->offload_assist = cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; if (sta && ieee80211_is_data(fc)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; } return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { int rate_idx = -1; u8 rate_plcp; u32 rate_flags = 0; bool is_cck; /* info->control is only relevant for non HW rate control */ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, le16_to_cpu(fc), sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; } /* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) rate_idx = rate_lowest_index( &mvm->nvm_data->bands[info->band], sta); /* * For non 2 GHZ band, remap mac80211 rate * indices into driver indices */ if (info->band != NL80211_BAND_2GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* For 2.4 GHZ band, check that there is no need to remap */ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); /* Set CCK or OFDM flag */ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { if (!is_cck) rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; else rate_flags |= RATE_MCS_CCK_MSK; } else if (is_cck) { rate_flags |= RATE_MCS_CCK_MSK_V1; } return (u32)rate_plcp | rate_flags; } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } /* * Sets the fields in the Tx cmd that are rate related */ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) { tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; tx_cmd->rts_retry_limit = min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); } else if (ieee80211_is_back_req(fc)) { tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; } else { tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; } /* * for data packets, rate info comes from the table inside the fw. This * table is controlled by LINK_QUALITY commands */ if (ieee80211_is_data(fc) && sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } } else if (ieee80211_is_back_req(fc)) { tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, u8 *crypto_hdr) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u64 pn; pn = atomic64_inc_return(&keyconf->tx_pn); crypto_hdr[0] = pn; crypto_hdr[2] = 0; crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); crypto_hdr[1] = pn >> 8; crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; } /* * Sets the fields in the Tx cmd that are crypto related */ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb_frag, int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; pn = atomic64_inc_return(&keyconf->tx_pn); ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; fallthrough; case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & TX_CMD_SEC_WEP_KEY_IDX_MSK); memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: type = TX_CMD_SEC_GCMP; fallthrough; case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; default: tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; } } /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); if (unlikely(!dev_cmd)) return NULL; dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; bool amsdu = false; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* * For data packets rate info comes from the fw. Only * set rate/antenna during connection establishment or in case * no station is given. */ if (!sta || !ieee80211_is_data(hdr->frame_control) || mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, hdr->frame_control); } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, info, amsdu); cmd->offload_assist = cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); cmd->offload_assist = cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } goto out; } tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); out: return dev_cmd; } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: /* * Non-bufferable frames use the broadcast station, thus they * use the probe queue. * Also take care of the case where we send a deauth to a * station that we don't have, or similarly an association * response (with non-success status) for a station we can't * accept. * Also, disassociate frames might happen, particular with * reason 7 ("Class 3 frame received from nonassociated STA"). */ if (ieee80211_is_mgmt(fc) && (!ieee80211_is_bufferable_mmpdu(fc) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && is_multicast_ether_addr(hdr->addr1)) return mvmvif->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); return mvm->probe_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; } } static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; struct iwl_probe_resp_data *resp_data; const u8 *ie; u8 *pos; u8 match[] = { (WLAN_OUI_WFA >> 16) & 0xff, (WLAN_OUI_WFA >> 8) & 0xff, WLAN_OUI_WFA & 0xff, WLAN_OUI_TYPE_WFA_P2P, }; rcu_read_lock(); resp_data = rcu_dereference(mvmvif->probe_resp_data); if (!resp_data) goto out; if (!resp_data->notif.noa_active) goto out; ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, mgmt->u.probe_resp.variable, skb->len - base_len, match, 4, 2); if (!ie) { IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); goto out; } if (skb_tailroom(skb) < resp_data->noa_len) { if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { IWL_ERR(mvm, "Failed to reallocate probe resp\n"); goto out; } } pos = skb_put(skb, resp_data->noa_len); *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* Set length of IE body (not including ID and length itself) */ *pos++ = resp_data->noa_len - 2; *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; *pos++ = WLAN_OUI_WFA & 0xff; *pos++ = WLAN_OUI_TYPE_WFA_P2P; memcpy(pos, &resp_data->notif.noa_attr, resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); out: rcu_read_unlock(); } int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) return -1; if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { if (!ieee80211_is_data(hdr->frame_control)) sta_id = mvmvif->bcast_sta.sta_id; else sta_id = mvmvif->mcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & * STATION. * P2P uses the offchannel queue. * STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue. */ sta_id = mvm->aux_sta.sta_id; queue = mvm->aux_queue; } } if (queue < 0) { IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } return 0; } unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; u8 ac = tid_to_mac80211_ac[tid]; unsigned int txf; int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); /* For HE redirect to trigger based fifos */ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); /* * Don't send an AMSDU that will be longer than the TXF. * Add a security margin of 256 for the TX command + headers. * We also want to have the start of the next packet inside the * fifo to be able to send bursts. */ return min_t(unsigned int, mvmsta->max_amsdu_len, mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); } #ifdef CONFIG_INET static int iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, netdev_features_t netdev_flags, struct sk_buff_head *mpdus_skb) { struct sk_buff *tmp, *next; struct ieee80211_hdr *hdr = (void *)skb->data; char cb[sizeof(skb->cb)]; u16 i = 0; unsigned int tcp_payload_len; unsigned int mss = skb_shinfo(skb)->gso_size; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); bool qos = ieee80211_is_data_qos(hdr->frame_control); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); next = skb_gso_segment(skb, netdev_flags); skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) consume_skb(skb); skb_list_walk_safe(next, tmp, next) { memcpy(tmp->cb, cb, sizeof(tmp->cb)); /* * Compute the length of all the data added for the A-MSDU. * This will be used to compute the length to write in the TX * command. We have: SNAP + IP + TCP for n -1 subframes and * ETH header for n subframes. */ tcp_payload_len = skb_tail_pointer(tmp) - skb_transport_header(tmp) - tcp_hdrlen(tmp) + tmp->data_len; if (ipv4) ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { skb_shinfo(tmp)->gso_size = mss; skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; } else { if (qos) { u8 *qc; if (ipv4) ip_send_check(ip_hdr(tmp)); qc = ieee80211_get_qos_ctl((void *)tmp->data); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } skb_shinfo(tmp)->gso_size = 0; } skb_mark_not_on_list(tmp); __skb_queue_tail(mpdus_skb, tmp); i++; } return 0; } static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; u16 snap_ip_tcp, pad; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || !mvmsta->amsdu_enabled) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. * ask stack to segment and checkum the generated MPDUs for us. */ if (skb->protocol == htons(ETH_P_IPV6) && ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { netdev_flags &= ~NETIF_F_CSUM_MASK; return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ if ((info->flags & IEEE80211_TX_CTL_AMPDU && !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Take the min of ieee80211 station and mvm station */ max_amsdu_len = min_t(unsigned int, sta->max_amsdu_len, iwl_mvm_max_amsdu_size(mvm, sta, tid)); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not * supported. This is a spec requirement (IEEE 802.11-2015 * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !sta->vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; pad = (4 - subf_len) & 0x3; /* * If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); if (sta->max_amsdu_subframes && num_subframes > sta->max_amsdu_subframes) num_subframes = sta->max_amsdu_subframes; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; /* * Make sure we have enough TBs for the A-MSDU: * 2 for each subframe * 1 more for each fragment * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > mvm->trans->max_skb_frags) num_subframes = 1; if (num_subframes > 1) *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { __skb_queue_tail(mpdus_skb, skb); return 0; } /* * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { /* Impossible to get TSO with CONFIG_INET */ WARN_ON(1); return -1; } #endif /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; unsigned long now = jiffies; int tid; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) return true; } return false; } static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return; mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata->tx.airtime += airtime; } static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return -EINVAL; mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; return 0; } /* * Sets the fields in the Tx cmd that are crypto related. * * This function must be called with BHs disabled. */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u16 txq_id; bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) goto drop; /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses * we'll notify mac80211 when getting frame status */ info->flags &= ~IEEE80211_TX_STATUS_EOSP; spin_lock(&mvmsta->lock); /* nullfunc frames should go to the MGMT queue regardless of QOS, * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default * assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) goto drop_unlock_sta; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON, "Invalid internal agg state %d for TID %d", mvmsta->tid_data[tid].state, tid)) goto drop_unlock_sta; seq_number = mvmsta->tid_data[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; if (!iwl_mvm_has_new_tx_api(mvm)) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { tid = IWL_TID_NON_QOS; } txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; /* * If we have timed-out TIDs - schedule the worker that will * reconfig the queues and update them * * Note that the no lock is taken here in order to not serialize * the TX flow. This isn't dangerous because scheduling * mvm->add_stream_wk can't ruin the state, and if we DON'T * schedule it due to some race condition then next TX we get * here we will. */ if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED && iwl_mvm_txq_should_update(mvm, txq_id))) schedule_work(&mvm->add_stream_wk); } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (ieee80211_is_data(fc)) iwl_mvm_mei_tx_copy_to_csme(mvm, skb, info->control.hw_key ? info->control.hw_key->iv_len : 0); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid)) goto drop; return 0; drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); return -1; } int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); __skb_queue_head_init(&mpdus_skbs); ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; if (WARN_ON(skb_queue_empty(&mpdus_skbs))) return ret; while (!skb_queue_empty(&mpdus_skbs)) { skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { __skb_queue_purge(&mpdus_skbs); return ret; } } return 0; } static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct ieee80211_vif *vif = mvmsta->vif; u16 normalized_ssn; lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell * mac80211 so it knows we no longer have frames buffered for * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) return; switch (tid_data->state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue addBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IWL_EMPTYING_HW_QUEUE_DELBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; default: break; } } #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status) { #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: return "SUCCESS"; TX_STATUS_POSTPONE(DELAY); TX_STATUS_POSTPONE(FEW_BYTES); TX_STATUS_POSTPONE(BT_PRIO); TX_STATUS_POSTPONE(QUIET_PERIOD); TX_STATUS_POSTPONE(CALC_TTAK); TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); TX_STATUS_FAIL(SHORT_LIMIT); TX_STATUS_FAIL(LONG_LIMIT); TX_STATUS_FAIL(UNDERRUN); TX_STATUS_FAIL(DRAIN_FLOW); TX_STATUS_FAIL(RFKILL_FLUSH); TX_STATUS_FAIL(LIFE_EXPIRE); TX_STATUS_FAIL(DEST_PS); TX_STATUS_FAIL(HOST_ABORTED); TX_STATUS_FAIL(BT_RETRY); TX_STATUS_FAIL(STA_INVALID); TX_STATUS_FAIL(FRAG_DROPPED); TX_STATUS_FAIL(TID_DISABLE); TX_STATUS_FAIL(FIFO_FLUSHED); TX_STATUS_FAIL(SMALL_CF_POLL); TX_STATUS_FAIL(FW_DROP); TX_STATUS_FAIL(STA_COLOR_MISMATCH); } return "UNKNOWN"; #undef TX_STATUS_FAIL #undef TX_STATUS_POSTPONE } #endif /* CONFIG_IWLWIFI_DEBUG */ static int iwl_mvm_get_hwrate_chan_width(u32 chan_width) { switch (chan_width) { case RATE_MCS_CHAN_WIDTH_20: return 0; case RATE_MCS_CHAN_WIDTH_40: return IEEE80211_TX_RC_40_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_80: return IEEE80211_TX_RC_80_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_160: return IEEE80211_TX_RC_160_MHZ_WIDTH; default: return 0; } } void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; u32 rate = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : rate_n_flags & RATE_MCS_CODE_MSK; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK); if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (format == RATE_MCS_HT_MSK) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate; } else if (format == RATE_MCS_VHT_MSK) { ieee80211_rate_set_vht(r, rate, ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else if (format == RATE_MCS_HE_MSK) { /* mac80211 cannot do this without ieee80211_tx_status_ext() * but it only matters for radiotap */ r->idx = 0; } else { r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, band); } } void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) r->flags |= IEEE80211_TX_RC_GREEN_FIELD; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1); if (rate_n_flags & RATE_MCS_SGI_MSK_V1) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else { r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, band); } } /* * translate ucode response to mac80211 tx status control values */ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, - TX_CMD, 0) > 6) + TX_CMD, 0) <= 6) rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, u32 status, __le16 frame_control) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) { enum iwl_fw_ini_time_point tp = IWL_FW_INI_TIME_POINT_TX_FAILED; if (ieee80211_is_action(frame_control)) tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED; iwl_dbg_tlv_time_point(&mvm->fwrt, tp, NULL); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_TX_STATUS); if (!trig) return; status_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) break; if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx status %d was received", status & TX_STATUS_MSK); break; } } /* * iwl_mvm_get_scd_ssn - returns the SSN of the SCD * @tx_resp: the Tx response from the fw (agg or non-agg) * * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since * it can't know that everything will go well until the end of the AMPDU, it * can't know in advance the number of MPDUs that will be sent in the current * batch. This is why it writes the agg Tx response while it fetches the MPDUs. * Hence, it can't know in advance what the SSN of the SCD will be at the end * of the batch. This is why the SSN of the SCD is written at the end of the * whole struct at a variable offset. This function knows how to cope with the * variable offset and returns the SSN of the SCD. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count) & 0xfff; } static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); /* struct iwl_mvm_tx_resp_v3 is almost the same */ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct sk_buff_head skbs; u8 skb_freed = 0; u8 lq_color; u16 next_reclaimed, seq_ctl; bool is_ndp = false; __skb_queue_head_init(&skbs); if (iwl_mvm_has_new_tx_api(mvm)) txq_id = le16_to_cpu(tx_resp->tx_queue); seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_FIFO_FLUSHED: case TX_STATUS_FAIL_DRAIN_FLOW: flushed = true; break; case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status */ IWL_ERR_LIMIT(mvm, "FW reported TX filtered, status=0x%x, FC=0x%x\n", status, le16_to_cpu(hdr->frame_control)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: break; } if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && ieee80211_is_mgmt(hdr->frame_control)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); /* * If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged * before * */ if (skb_freed > 1) info->flags |= IEEE80211_TX_STAT_ACK; iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(mvm->fw, le32_to_cpu(tx_resp->initial_rate), info); /* Don't assign the converted initial_rate, because driver * TLC uses this and doesn't support the new FW rate */ info->status.status_driver_data[1] = (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ if (ieee80211_is_back_req(hdr->frame_control)) seq_ctl = 0; else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); if (unlikely(!seq_ctl)) { /* * If it is an NDP, we can't update next_reclaim since * its sequence control is 0. Note that for that same * reason, NDPs are never sent to A-MPDU'able queues * so that we can never have more than one freed frame * for a single Tx resonse (see WARN_ON below). */ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) is_ndp = true; } /* * TODO: this is not accurate if we are freeing more than one * packet. */ info->status.tx_time = le16_to_cpu(tx_resp->wireless_media_time); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); info->status.status_driver_data[0] = RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); ieee80211_tx_status(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use * the ssn since: ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which * this Tx response relates. But if there is a hole in the * bitmap of the BA we received, this Tx response may allow to * reclaim the hole and all the subsequent packets that were * already acked. In that case, seq_ctl != ssn, and the next * packet to be reclaimed will be ssn and not seq_ctl. In that * case, several packets will be reclaimed even if * frame_count = 1. * * The ssn is the index (% 256) of the latest packet that has * treated (acked / dropped) + 1. */ next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", txq_id, iwl_mvm_get_tx_fail_reason(status), status); IWL_DEBUG_TX_REPLY(mvm, "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", le32_to_cpu(tx_resp->initial_rate), tx_resp->failure_frame, SEQ_TO_INDEX(sequence), ssn, next_reclaimed, seq_ctl); rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * sta can't be NULL otherwise it'd mean that the sta has been freed in * the firmware while we still have packets for it in the Tx queues. */ if (WARN_ON_ONCE(!sta)) goto out; if (!IS_ERR(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); } iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { mvmsta->sleep_tx_count--; if (mvmsta->sleep_tx_count && !iwl_mvm_tid_queued(mvm, tid_data)) { /* * The number of frames in the queue * dropped to 0 even if we sent less * frames than we thought we had on the * Tx queue. * This means we had holes in the BA * window that we just filled, ask * mac80211 to send EOSP since the * firmware won't know how to do that. * Send NDP and the firmware will send * EOSP notification that will trigger * a call to ieee80211_sta_eosp(). */ send_eosp_ndp = true; } } spin_unlock_bh(&mvmsta->lock); if (send_eosp_ndp) { iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, IEEE80211_FRAME_RELEASE_UAPSD, 1, tid, false, false); mvmsta->sleep_tx_count = 0; ieee80211_send_eosp_nullfunc(sta, tid); } } if (mvmsta->next_status_eosp) { mvmsta->next_status_eosp = false; ieee80211_sta_eosp(sta); } } out: rcu_read_unlock(); } #ifdef CONFIG_IWLWIFI_DEBUG #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x static const char *iwl_get_agg_tx_status(u16 status) { switch (status & AGG_TX_STATE_STATUS_MSK) { AGG_TX_STATE_(TRANSMITTED); AGG_TX_STATE_(UNDERRUN); AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); AGG_TX_STATE_(TEST_BAD_CRC32); AGG_TX_STATE_(RESPONSE); AGG_TX_STATE_(DUMP_TX); AGG_TX_STATE_(DELAY_TX); } return "UNKNOWN"; } static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; bool tirgger_timepoint = false; for (i = 0; i < tx_resp->frame_count; i++) { u16 fstatus = le16_to_cpu(frame_status[i].status); /* In case one frame wasn't transmitted trigger time point */ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) != AGG_TX_STATE_TRANSMITTED); IWL_DEBUG_TX_REPLY(mvm, "status %s (0x%04x), try-count (%d) seq (0x%x)\n", iwl_get_agg_tx_status(fstatus), fstatus & AGG_TX_STATE_STATUS_MSK, (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> AGG_TX_STATE_TRY_CNT_POS, le16_to_cpu(frame_status[i].sequence)); } if (tirgger_timepoint) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_TX_FAILED, NULL); } #else static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) {} #endif /* CONFIG_IWLWIFI_DEBUG */ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); struct ieee80211_sta *sta; if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(!sta || !sta->wme)) { rcu_read_unlock(); return; } if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); } void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); } static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int txq, int index, struct ieee80211_tx_info *tx_info, u32 rate, bool is_flush) { struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data = NULL; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta = NULL; struct sk_buff *skb; int freed; if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || tid > IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(!sta)) { rcu_read_unlock(); return; } __skb_queue_head_init(&reclaimed_skbs); /* * Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* Packet was transmitted successfully, failures come as single * frames because before failing a frame the firmware transmits * it without aggregation at least once. */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; } /* * It's possible to get a BA response after invalidating the rcu (rcu is * invalidated in order to prevent new Tx from being sent, but there may * be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ if (IS_ERR(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id != txq) { IWL_ERR(mvm, "invalid reclaim request: Q %d, tid %d\n", tid_data->txq_id, tid); rcu_read_unlock(); return; } spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); freed = 0; /* pack lq color from tid_data along the reduced txp */ tx_info->status.status_driver_data[0] = RS_DRV_DATA_PACK(tid_data->lq_color, tx_info->status.status_driver_data[0]); tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (!is_flush) { if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); } /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ if (freed == 1) { info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &tx_info->status, sizeof(tx_info->status)); iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); } } spin_unlock_bh(&mvmsta->lock); /* We got a BA notif with 0 acked or scd_ssn didn't progress which is * possible (i.e. first MPDU in the aggregation wasn't acked) * Still it's important to update RS about sent vs. acked. */ if (!is_flush && skb_queue_empty(&reclaimed_skbs)) { struct ieee80211_chanctx_conf *chanctx_conf = NULL; if (mvmsta->vif) chanctx_conf = rcu_dereference(mvmsta->vif->chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) goto out; tx_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); if (!iwl_mvm_has_tlc_offload(mvm)) { IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); } } out: rcu_read_unlock(); while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } } void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); int sta_id, tid, txq, index; struct ieee80211_tx_info ba_info = {}; struct iwl_mvm_ba_notif *ba_notif; struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_sta *mvmsta; ba_info.flags = IEEE80211_TX_STAT_AMPDU; if (iwl_mvm_has_new_tx_api(mvm)) { struct iwl_mvm_compressed_ba_notif *ba_res = (void *)pkt->data; u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); u16 tfd_cnt; int i; if (unlikely(sizeof(*ba_res) > pkt_len)) return; sta_id = ba_res->sta_id; ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); ba_info.status.tx_time = (u16)le32_to_cpu(ba_res->wireless_time); ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len) return; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); /* * It's possible to get a BA response after invalidating the rcu * (rcu is invalidated in order to prevent new Tx from being * sent, but there may be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ /* Free per TID */ for (i = 0; i < tfd_cnt; i++) { struct iwl_mvm_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; tid = ba_tfd->tid; if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; if (mvmsta) mvmsta->tid_data[i].lq_color = lq_color; iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate), false); } if (mvmsta) iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); rcu_read_unlock(); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed), le16_to_cpu(ba_res->done)); return; } ba_notif = (void *)pkt->data; sta_id = ba_notif->sta_id; tid = ba_notif->tid; /* "flow" corresponds to Tx queue */ txq = le16_to_cpu(ba_notif->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ index = le16_to_cpu(ba_notif->scd_ssn); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); if (WARN_ON_ONCE(!mvmsta)) { rcu_read_unlock(); return; } tid_data = &mvmsta->tid_data[tid]; ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; ba_info.status.ampdu_len = ba_notif->txed; ba_info.status.tx_time = tid_data->tx_time; ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_notif->reduced_txp; rcu_read_unlock(); iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, tid_data->rate_n_flags, false); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); IWL_DEBUG_TX_REPLY(mvm, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), le64_to_cpu(ba_notif->bitmap), txq, index, ba_notif->txed, ba_notif->txed_2_done); IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", ba_notif->reduced_txp); } /* * Note that there are transports that buffer frames before they reach * the firmware. This means that after flush_tx_path is called, the * queue might not be empty. The race-free way to handle this is to: * 1) set the station as draining * 2) flush the Tx path * 3) wait for the transport queues to be empty */ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk) { int ret; struct iwl_tx_path_flush_cmd_v1 flush_cmd = { .queues_ctl = cpu_to_le32(tfd_msk), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0, sizeof(flush_cmd), &flush_cmd); if (ret) IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids) { int ret; struct iwl_tx_path_flush_cmd_rsp *rsp; struct iwl_tx_path_flush_cmd flush_cmd = { .sta_id = cpu_to_le32(sta_id), .tid_mask = cpu_to_le16(tids), }; struct iwl_host_cmd cmd = { .id = TXPATH_FLUSH, .len = { sizeof(flush_cmd), }, .data = { &flush_cmd, }, }; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) cmd.flags |= CMD_WANT_SKB; IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n", sta_id, tids); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } if (cmd.flags & CMD_WANT_SKB) { int i; int num_flushed_queues; if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) { ret = -EIO; goto free_rsp; } rsp = (void *)cmd.resp_pkt->data; if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id, "sta_id %d != rsp_sta_id %d", sta_id, le16_to_cpu(rsp->sta_id))) { ret = -EIO; goto free_rsp; } num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP, "num_flushed_queues %d", num_flushed_queues)) { ret = -EIO; goto free_rsp; } for (i = 0; i < num_flushed_queues; i++) { struct ieee80211_tx_info tx_info = {}; struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; int tid = le16_to_cpu(queue_info->tid); int read_before = le16_to_cpu(queue_info->read_before_flush); int read_after = le16_to_cpu(queue_info->read_after_flush); int queue_num = le16_to_cpu(queue_info->queue_num); if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; IWL_DEBUG_TX_QUEUES(mvm, "tid %d queue_id %d read-before %d read-after %d\n", tid, queue_num, read_before, read_after); iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after, &tx_info, 0, true); } free_rsp: iwl_free_resp(&cmd); } return ret; } int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) { struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != offsetof(struct iwl_mvm_sta, sta_id)); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk); return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk); } diff --git a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c index 85a6da70ca78..75fd386b048e 100644 --- a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c +++ b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,342 +1,345 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" static void iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, struct iwl_prph_scratch_hwm_cfg *dbg_cfg, u32 *control_flags) { enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; u32 dbg_flags = 0; if (!iwl_trans_dbg_ini_valid(trans)) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; iwl_pcie_alloc_fw_monitor(trans, 0); if (fw_mon->size) { dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; IWL_DEBUG_FW(trans, "WRT: Applying DRAM buffer destination\n"); dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical); dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size); } goto out; } fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; switch (le32_to_cpu(fw_mon_cfg->buf_location)) { case IWL_FW_INI_LOCATION_SRAM_PATH: dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL; IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); break; case IWL_FW_INI_LOCATION_NPK_PATH: dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF; IWL_DEBUG_FW(trans, "WRT: Applying NPK buffer destination\n"); break; case IWL_FW_INI_LOCATION_DRAM_PATH: if (trans->dbg.fw_mon_ini[alloc_id].num_frags) { struct iwl_dram_data *frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical); dbg_cfg->hwm_size = cpu_to_le32(frag->size); dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset); IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (debug_token_config=%u)\n", dbg_cfg->debug_token_config); IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n", alloc_id, trans->dbg.fw_mon_ini[alloc_id].num_frags); } break; default: IWL_ERR(trans, "WRT: Invalid buffer destination\n"); } out: if (dbg_flags) *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags; } int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; u32 control_flags = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_DEF: return -EINVAL; case IWL_AMSDU_2K: break; case IWL_AMSDU_4K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; break; case IWL_AMSDU_8K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; break; case IWL_AMSDU_12K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; break; } /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL); if (!prph_scratch) return -ENOMEM; prph_sc_ctrl = &prph_scratch->ctrl_cfg; prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; + if (trans->trans_cfg->imr_enabled) + control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; + /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg, &control_flags); prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) goto err_free_prph_scratch; /* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later * * We also use the second half of this page to give the device some * dummy TR/CR tail pointers - which shouldn't be necessary as we don't * use this, but the hardware still reads/writes there and we can't let * it go do that with a NULL pointer. */ BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2); prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE, &trans_pcie->prph_info_dma_addr, GFP_KERNEL); if (!prph_info) { ret = -ENOMEM; goto err_free_prph_scratch; } /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info_gen3) { ret = -ENOMEM; goto err_free_prph_info; } ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr); ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch)); ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; trans_pcie->prph_info = prph_info; trans_pcie->prph_scratch = prph_scratch; /* Allocate IML */ trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); if (!trans_pcie->iml) { ret = -ENOMEM; goto err_free_ctxt_info; } memcpy(trans_pcie->iml, trans->iml, trans->iml_len); iwl_enable_fw_load_int_ctx_info(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); return 0; err_free_ctxt_info: dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_gen3 = NULL; err_free_prph_info: dma_free_coherent(trans->dev, PAGE_SIZE, prph_info, trans_pcie->prph_info_dma_addr); err_free_prph_scratch: dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch, trans_pcie->prph_scratch_dma_addr); return ret; } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->iml) { dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, trans_pcie->iml_dma_addr); trans_pcie->iml_dma_addr = 0; trans_pcie->iml = NULL; } iwl_pcie_ctxt_info_free_fw_img(trans); if (alive) return; if (!trans_pcie->ctxt_info_gen3) return; /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info_gen3 = NULL; dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch, trans_pcie->prph_scratch_dma_addr); trans_pcie->prph_scratch_dma_addr = 0; trans_pcie->prph_scratch = NULL; /* this is needed for the entire lifetime */ dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info, trans_pcie->prph_info_dma_addr); trans_pcie->prph_info_dma_addr = 0; trans_pcie->prph_info = NULL; } int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, const void *data, u32 len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; int ret; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; /* only allocate the DRAM if not allocated yet */ if (!trans->pnvm_loaded) { if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) return -EBUSY; ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, &trans_pcie->pnvm_dram); if (ret < 0) { IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n", ret); return ret; } } prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = cpu_to_le64(trans_pcie->pnvm_dram.physical); prph_sc_ctrl->pnvm_cfg.pnvm_size = cpu_to_le32(trans_pcie->pnvm_dram.size); return 0; } int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, const void *data, u32 len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; int ret; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; /* only allocate the DRAM if not allocated yet */ if (!trans->reduce_power_loaded) { if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) return -EBUSY; ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, &trans_pcie->reduce_power_dram); if (ret < 0) { IWL_DEBUG_FW(trans, "Failed to allocate reduce power DMA %d.\n", ret); return ret; } } prph_sc_ctrl->reduce_power_cfg.base_addr = cpu_to_le64(trans_pcie->reduce_power_dram.physical); prph_sc_ctrl->reduce_power_cfg.size = cpu_to_le32(trans_pcie->reduce_power_dram.size); return 0; } diff --git a/sys/contrib/dev/iwlwifi/pcie/drv.c b/sys/contrib/dev/iwlwifi/pcie/drv.c index cd890fae6fbc..536a40f1e5cc 100644 --- a/sys/contrib/dev/iwlwifi/pcie/drv.c +++ b/sys/contrib/dev/iwlwifi/pcie/drv.c @@ -1,1768 +1,1769 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include "fw/acpi.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-prph.h" #include "internal.h" #define TRANS_CFG_MARKER BIT(0) #define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ struct _struct) extern int _invalid_type; #define _TRANS_CFG_MARKER(cfg) \ (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ TRANS_CFG_MARKER, \ __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) #define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ static const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ /* 5300 Series WiFi */ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ /* 5350 Series WiFi/WiMax */ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ /* 5150 Series Wifi/WiMax */ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ /* 6x00 Series */ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, /* 6x05 Series */ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ /* 6x30 Series */ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, /* 6x50 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, /* 6150 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, /* 1000 Series WiFi */ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, /* 100 Series WiFi */ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, /* 130 Series WiFi */ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, /* 2x00 Series */ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, /* 2x30 Series */ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, /* 6x35 Series */ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, /* 105 Series */ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, /* 135 Series */ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) /* 7260 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, /* 3160 Series */ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 3168 Series */ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)}, {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)}, {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)}, /* Qu devices */ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)}, {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, /* So devices */ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)}, - {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, /* Ma devices */ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) /* 9000 */ IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name), /* AX200 */ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), /* Qu with Hr */ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), /* So with HR */ IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), /* So with JF */ IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), /* SnJ with HR */ IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), IWL_DEV_INFO(0x2726, 0x00B4, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), /* SO with GF2 */ IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), /* MA with GF2 */ IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QnJ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), /* QnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), /* SnJ with Jf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_name), /* SnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_hr_b0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_mr_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_fm_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, iwl_cfg_gl_a0_fm_a0, iwl_bz_name), /* BZ Z step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_z0_gf_a0, iwl_bz_name), /* BNJ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* MsP */ /* For now we use the same FW as MR, but this will change in the future. */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_ms_a0, iwl_ax204_name) #endif /* CONFIG_IWLMVM */ }; /* * In case that there is no OTP on the NIC, get the rf id and cdb info * from the prph registers. */ static int get_crf_id(struct iwl_trans *iwl_trans) { int ret = 0; u32 sd_reg_ver_addr; u32 cdb = 0; u32 val; if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; else sd_reg_ver_addr = SD_REG_VER; if (!iwl_trans_grab_nic_access(iwl_trans)) { IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); ret = -EIO; goto out; } /* Enable access to peripheral registers */ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); /* Read cdb info (also contains the jacket info if needed in the future */ cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { case REG_CRF_ID_TYPE_JF_1: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); break; case REG_CRF_ID_TYPE_JF_2: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); break; case REG_CRF_ID_TYPE_HR_NONE_CDB: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); break; case REG_CRF_ID_TYPE_HR_CDB: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); break; case REG_CRF_ID_TYPE_GF: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); break; case REG_CRF_ID_TYPE_MR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); break; case REG_CRF_ID_TYPE_FM: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; default: ret = -EIO; IWL_ERR(iwl_trans, "Can find a correct rfid for crf id 0x%x\n", REG_CRF_ID_TYPE(val)); goto out_release; } /* Set CDB capabilities */ if (cdb & BIT(4)) { iwl_trans->hw_rf_id += BIT(28); IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); } IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n", iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val)); out_release: iwl_trans_release_nic_access(iwl_trans); out: return ret; } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 static const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; if (!num_devices) return NULL; for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && dev_info->device != device) continue; if (dev_info->subdevice != (u16)IWL_CFG_ANY && dev_info->subdevice != subsystem_device) continue; if (dev_info->mac_type != (u16)IWL_CFG_ANY && dev_info->mac_type != mac_type) continue; if (dev_info->mac_step != (u8)IWL_CFG_ANY && dev_info->mac_step != mac_step) continue; if (dev_info->rf_type != (u16)IWL_CFG_ANY && dev_info->rf_type != rf_type) continue; if (dev_info->cdb != (u8)IWL_CFG_ANY && dev_info->cdb != cdb) continue; if (dev_info->jacket != (u8)IWL_CFG_ANY && dev_info->jacket != jacket) continue; if (dev_info->rf_id != (u8)IWL_CFG_ANY && dev_info->rf_id != rf_id) continue; if (dev_info->no_160 != (u8)IWL_CFG_ANY && dev_info->no_160 != no_160) continue; if (dev_info->cores != (u8)IWL_CFG_ANY && dev_info->cores != cores) continue; return dev_info; } return NULL; } static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg_trans_params *trans; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; const struct iwl_dev_info *dev_info; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int ret; const struct iwl_cfg *cfg; trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* * This is needed for backwards compatibility with the old * tables, so we don't need to change all the config structs * at the same time. The cfg is used to compare with the old * full cfg structs. */ cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* make sure trans is the first element in iwl_cfg */ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); /* * Let's try to grab NIC access early here. Sometimes, NICs may * fail to initialize, and if that happens it's better if we see * issues early on (and can reprobe, per the logic inside), than * first trying to load the firmware etc. and potentially only * detecting any problems when the first interface is brought up. */ ret = iwl_pcie_prepare_card_hw(iwl_trans); if (!ret) { ret = iwl_finish_nic_init(iwl_trans); if (ret) goto out_free_trans; if (iwl_trans_grab_nic_access(iwl_trans)) { /* all good */ iwl_trans_release_nic_access(iwl_trans); } else { ret = -EIO; goto out_free_trans; } } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); /* * The RF_ID is set to zero in blank OTP so read version to * extract the RF_ID. * This is relevant only for family 9000 and up. */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { ret = -EINVAL; goto out_free_trans; } dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), iwl_trans->hw_rev_step, CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), IWL_SUBDEVICE_NO_160(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; } #if IS_ENABLED(CONFIG_IWLMVM) /* * Workaround for problematic SnJ device: sometimes when * certain RF modules are connected to SnJ, the device ID * changes to QnJ's ID. So we are using QnJ's trans_cfg until * here. But if we detect that the MAC type is actually SnJ, * we should switch to it here to avoid problems later. */ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) iwl_trans->trans_cfg = &iwl_so_trans_cfg; /* * special-case 7265D, it has the same PCI IDs. * * Note that because we already pass the cfg to the transport above, * all the parameters that the transport uses must, until that is * changed, be identical to the ones in the 7265D configuration. */ if (cfg == &iwl7265_2ac_cfg) cfg_7265d = &iwl7265d_2ac_cfg; else if (cfg == &iwl7265_2n_cfg) cfg_7265d = &iwl7265d_2n_cfg; else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) iwl_trans->cfg = cfg_7265d; /* * This is a hack to switch from Qu B0 to Qu C0. We need to * do this for all cfgs that use Qu B0, except for those using * Jf, which have already been moved to the new table. The * rest must be removed once we convert Qu with Hr as well. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; } /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; } #endif /* * If we didn't set the cfg yet, the PCI ID table entry should have * been a full config - if yes, use it, otherwise fail. */ if (!iwl_trans->cfg) { if (ent->driver_data & TRANS_CFG_MARKER) { pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", pdev->device, pdev->subsystem_device, iwl_trans->hw_rev, iwl_trans->hw_rf_id); ret = -EINVAL; goto out_free_trans; } iwl_trans->cfg = cfg; } /* if we don't have a name yet, copy name from the old cfg */ if (!iwl_trans->name) iwl_trans->name = iwl_trans->cfg->name; if (iwl_trans->trans_cfg->mq_rx_supported) { if (WARN_ON(!iwl_trans->cfg->num_rbds)) { ret = -EINVAL; goto out_free_trans; } trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds; } else { trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } ret = iwl_trans_init(iwl_trans); if (ret) goto out_free_trans; pci_set_drvdata(pdev, iwl_trans); /* try to get ownership so that we'll know if we don't own it */ iwl_pcie_prepare_card_hw(iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { ret = PTR_ERR(iwl_trans->drv); goto out_free_trans; } /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans); return 0; out_free_trans: iwl_trans_pcie_free(iwl_trans); return ret; } static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); } #ifdef CONFIG_PM_SLEEP static int iwl_pci_suspend(struct device *device) { /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ return 0; } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - the NIC may be alive. */ /* * We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); if (!trans->op_mode) return 0; /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return 0; /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; } static const struct dev_pm_ops iwl_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, iwl_pci_resume) }; #define IWL_PM_OPS (&iwl_dev_pm_ops) #else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, .id_table = iwl_hw_card_ids, .probe = iwl_pci_probe, .remove = iwl_pci_remove, .driver.pm = IWL_PM_OPS, #if defined(__FreeBSD__) /* Allow iwm(4) to attach for conflicting IDs for now. */ .bsd_probe_return = (BUS_PROBE_DEFAULT - 1), #endif }; int __must_check iwl_pci_register_driver(void) { int ret; ret = pci_register_driver(&iwl_pci_driver); if (ret) pr_err("Unable to initialize PCI module\n"); return ret; } void iwl_pci_unregister_driver(void) { pci_unregister_driver(&iwl_pci_driver); } diff --git a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c b/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c index 8df725ebda96..be4bc549acea 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c +++ b/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c @@ -1,467 +1,466 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2021 Intel Corporation */ #if defined(__FreeBSD__) #include #endif #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "fw/dbg.h" #define FW_RESET_TIMEOUT (HZ / 5) /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) { int ret = 0; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); ret = iwl_finish_nic_init(trans); if (ret) return ret; set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_gen2_apm_init(trans); /* inform ME that we are leaving */ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); iwl_trans_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_INIT); else iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; trans_pcie->fw_reset_state = FW_RESET_REQUESTED; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); else iwl_write32(trans, CSR_DOORBELL_VECTOR, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, trans_pcie->fw_reset_state != FW_RESET_REQUESTED, FW_RESET_TIMEOUT); if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { IWL_INFO(trans, "firmware didn't ACK the reset - continue anyway\n"); iwl_trans_fw_error(trans, true); } trans_pcie->fw_reset_state = FW_RESET_IDLE; } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; if (trans->state >= IWL_TRANS_FW_STARTED) if (trans_pcie->fw_reset_handshake) iwl_trans_pcie_fw_reset_handshake(trans); trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); } iwl_pcie_ctxt_info_free_paging(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_pcie_ctxt_info_gen3_free(trans, false); else iwl_pcie_ctxt_info_free(trans); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); /* re-take ownership to prevent other users from stealing the device */ iwl_trans_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); } void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, NULL); mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_gen2_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock_bh(&trans_pcie->irq_lock); iwl_pcie_gen2_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ if (iwl_pcie_gen2_rx_init(trans)) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); return 0; } static void iwl_pcie_get_rf_name(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char *buf = trans_pcie->rf_name; size_t buflen = sizeof(trans_pcie->rf_name); size_t pos; u32 version; if (buf[0]) return; switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF): pos = scnprintf(buf, buflen, "JF"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF): pos = scnprintf(buf, buflen, "GF"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4): pos = scnprintf(buf, buflen, "GF4"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): pos = scnprintf(buf, buflen, "HR"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): pos = scnprintf(buf, buflen, "HR1"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): pos = scnprintf(buf, buflen, "HRCDB"); break; default: return; } switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): version = iwl_read_prph(trans, CNVI_MBOX_C); switch (version) { case 0x20000: pos += scnprintf(buf + pos, buflen - pos, " B3"); break; case 0x120000: pos += scnprintf(buf + pos, buflen - pos, " B5"); break; default: pos += scnprintf(buf + pos, buflen - pos, " (0x%x)", version); break; } break; default: break; } pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x", trans->hw_rf_id); IWL_INFO(trans, "Detected RF %s\n", buf); /* * also add a \n for debugfs - need to do it after printing * since our IWL_INFO machinery wants to see a static \n at * the end of the string */ pos += scnprintf(buf + pos, buflen - pos, "\n"); } void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ memset(trans->txqs.queue_stopped, 0, sizeof(trans->txqs.queue_stopped)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_pcie_ctxt_info_gen3_free(trans, true); else iwl_pcie_ctxt_info_free(trans); /* * Re-enable all the interrupts, including the RF-Kill one, now that * the firmware is alive. */ iwl_enable_interrupts(trans); mutex_lock(&trans_pcie->mutex); iwl_pcie_check_hw_rf_kill(trans); iwl_pcie_get_rf_name(trans); mutex_unlock(&trans_pcie->mutex); } static void iwl_pcie_set_ltr(struct iwl_trans *trans) { u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | CSR_LTR_LONG_VAL_AD_SNOOP_REQ | u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); /* * To workaround hardware latency issues during the boot process, * initialize the LTR to ~250 usec (see ltr_val above). * The firmware initializes this again later (to a smaller value). */ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && !trans->trans_cfg->integrated) { iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); } else if (trans->trans_cfg->integrated && trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); } } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_gen2_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); else ret = iwl_pcie_ctxt_info_init(trans, fw); if (ret) goto out; iwl_pcie_set_ltr(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); } else { iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); } /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } diff --git a/sys/contrib/dev/iwlwifi/pcie/trans.c b/sys/contrib/dev/iwlwifi/pcie/trans.c index 05c8e4a0d2a6..7fea426e07e9 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans.c +++ b/sys/contrib/dev/iwlwifi/pcie/trans.c @@ -1,3780 +1,3779 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2007-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-scd.h" #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" #include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" /* extended range in FW SRAM */ #define IWL_FW_MEM_EXTENDED_START 0x40000 #define IWL_FW_MEM_EXTENDED_END 0x57FFF void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) { #define PCI_DUMP_SIZE 352 #define PCI_MEM_DUMP_SIZE 64 #define PCI_PARENT_DUMP_SIZE 524 #define PREFIX_LEN 32 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct pci_dev *pdev = trans_pcie->pci_dev; u32 i, pos, alloc_size, *ptr, *buf; char *prefix; if (trans_pcie->pcie_dbg_dumped_once) return; /* Should be a multiple of 4 */ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); /* Alloc a max size buffer */ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); buf = kmalloc(alloc_size, GFP_ATOMIC); if (!buf) return; prefix = (char *)buf + alloc_size - PREFIX_LEN; IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); /* Print wifi device registers */ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi device config registers:\n"); for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) *ptr = iwl_read32(trans, i); #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif } /* Print parent device registers next */ if (!pdev->bus->self) goto out; pdev = pdev->bus->self; sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", pci_name(pdev)); for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif /* Print root port AER registers */ pos = 0; pdev = pcie_find_root_port(pdev); if (pdev) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", pci_name(pdev)); sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif } goto out; err_read: #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif IWL_ERR(trans, "Read failed at 0x%X\n", i); out: trans_pcie->pcie_dbg_dumped_once = 1; kfree(buf); } static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_SW_RESET); else iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); usleep_range(5000, 6000); if (retake_ownership) return iwl_pcie_prepare_card_hw(trans); return 0; } static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; if (!fw_mon->size) return; dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, fw_mon->physical); fw_mon->block = NULL; fw_mon->physical = 0; fw_mon->size = 0; } static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, u8 max_power, u8 min_power) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; void *block = NULL; dma_addr_t physical = 0; u32 size = 0; u8 power; if (fw_mon->size) return; for (power = max_power; power >= min_power; power--) { size = BIT(power); block = dma_alloc_coherent(trans->dev, size, &physical, GFP_KERNEL | __GFP_NOWARN); if (!block) continue; IWL_INFO(trans, "Allocated 0x%08x bytes for firmware monitor.\n", size); break; } if (WARN_ON_ONCE(!block)) return; if (power != max_power) IWL_ERR(trans, "Sorry - debug buffer is only %luK while you requested %luK\n", (unsigned long)BIT(power - 10), (unsigned long)BIT(max_power - 10)); fw_mon->block = block; fw_mon->physical = physical; fw_mon->size = size; } void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { if (!max_power) { /* default max_power is maximum */ max_power = 26; } else { max_power += 11; } if (WARN(max_power > 26, "External buffer size for monitor is too big %d, check the FW TLV\n", max_power)) return; if (trans->dbg.fw_mon.size) return; iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); } static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (2 << 28))); return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); } static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (3 << 28))); } static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); else iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; u16 cap; /* * L0S states have been found to be unstable with our devices * and in newer hardware they are not officially supported at * all, so we must always set the L0S_DISABLED bit. */ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", trans->ltr_enabled ? "En" : "Dis"); } /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl_pcie_apm_init(struct iwl_trans *trans) { int ret; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* Disable L0S exit timer (platform NMI Work/Around) */ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ if (trans->trans_cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); ret = iwl_finish_nic_init(trans); if (ret) return ret; if (trans->cfg->host_interrupt_operation_mode) { /* * This is a bit of an abuse - This is needed for 7260 / 3160 * only check host_interrupt_operation_mode even if this is * not related to host_interrupt_operation_mode. * * Enable the oscillator to count wake up time for L1 exit. This * consumes slightly more power (100uA) - but allows to be sure * that we wake up from L1 on time. * * This looks weird: read twice the same register, discard the * value, set a bit, and yet again, read that same register * just to discard the value. But that's the way the hardware * seems to like it. */ iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); } /* * Enable DMA clock and wait for it to stabilize. * * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* Disable L1-Active */ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); /* Clear the interrupt in APMG if the NIC is in RFKILL */ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); } set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } /* * Enable LP XTAL to avoid HW bug where device may consume much power if * FW is not loaded after device reset. LP XTAL is disabled by default * after device HW reset. Do it only if XTAL is fed by internal source. * Configure device's "persistence" mode to avoid resetting XTAL again when * SHRD_HW_RST occurs in S3. */ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) { int ret; u32 apmg_gp1_reg; u32 apmg_xtal_cfg_reg; u32 dl_cfg_reg; /* Force XTAL ON */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); ret = iwl_trans_pcie_sw_reset(trans, true); if (!ret) ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); return; } /* * Clear "disable persistence" to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_PERSIST_DIS); /* * Force APMG XTAL to be active to prevent its disabling by HW * caused by APMG idle state. */ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_XTAL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); ret = iwl_trans_pcie_sw_reset(trans, true); if (ret) IWL_ERR(trans, "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | SHR_APMG_GP1_WF_XTAL_LP_EN | SHR_APMG_GP1_CHICKEN_BIT_SELECT); /* Clear delay line clock power up */ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); /* * Enable persistence mode to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, CSR_MONITOR_XTAL_RESOURCES); /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); udelay(10); /* Release APMG XTAL */ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg & ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); } void iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret; /* stop device's busmaster DMA activity */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 100); msleep(100); } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); ret = iwl_poll_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); } if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_DEBUG_INFO(trans, "stop master\n"); } static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_apm_init(trans); /* inform ME that we are leaving */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); if (trans->cfg->lp_xtal_workaround) { iwl_pcie_apm_lp_xtal_enable(trans); return; } iwl_trans_pcie_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static int iwl_pcie_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* nic_init */ spin_lock_bh(&trans_pcie->irq_lock); ret = iwl_pcie_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); if (ret) return ret; iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ ret = iwl_pcie_rx_init(trans); if (ret) return ret; /* Allocate or reset and init all Tx and Command queues */ if (iwl_pcie_tx_init(trans)) { iwl_pcie_rx_free(trans); return -ENOMEM; } if (trans->trans_cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); } return 0; } #define HW_READY_TIMEOUT (50) /* Note: returns poll_bit return value, which is >= 0 if success */ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) { int ret; iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); if (ret >= 0) iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); return ret; } /* Note: returns standard 0/-ERROR code */ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ if (ret >= 0) { trans->csme_own = false; return 0; } iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); usleep_range(1000, 2000); for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); do { ret = iwl_pcie_set_hw_ready(trans); if (ret >= 0) { trans->csme_own = false; return 0; } if (iwl_mei_is_connected()) { IWL_DEBUG_INFO(trans, "Couldn't prepare the card but SAP is connected\n"); trans->csme_own = true; if (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000) IWL_ERR(trans, "SAP not supported for this NIC family\n"); return -EBUSY; } usleep_range(200, 1000); t += 200; } while (t < 150000); msleep(25); } IWL_ERR(trans, "Couldn't prepare the card\n"); return ret; } /* * ucode */ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); } static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; trans_pcie->ucode_write_complete = false; if (!iwl_trans_grab_nic_access(trans)) return -EIO; iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, byte_cnt); iwl_trans_release_nic_access(trans); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); if (!ret) { IWL_ERR(trans, "Failed to load firmware chunk!\n"); iwl_trans_pcie_dump_regs(trans); return -ETIMEDOUT; } return 0; } static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, const struct fw_desc *section) { u8 *v_addr; dma_addr_t p_addr; u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL | __GFP_NOWARN); if (!v_addr) { IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); chunk_sz = PAGE_SIZE; v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL); if (!v_addr) return -ENOMEM; } for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size, dst_addr; bool extended_addr = false; copy_size = min_t(u32, chunk_sz, section->len - offset); dst_addr = section->offset + offset; if (dst_addr >= IWL_FW_MEM_EXTENDED_START && dst_addr <= IWL_FW_MEM_EXTENDED_END) extended_addr = true; if (extended_addr) iwl_set_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); memcpy(v_addr, (const u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, copy_size); if (extended_addr) iwl_clear_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); if (ret) { IWL_ERR(trans, "Could not load the [%d] uCode section\n", section_num); break; } } dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int shift_param; int i, ret = 0, sec_num = 0x1; u32 val, last_read_idx = 0; if (cpu == 1) { shift_param = 0; *first_ucode_section = 0; } else { shift_param = 16; (*first_ucode_section)++; } for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; /* Notify ucode of loaded section number and status */ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); val = val | (sec_num << shift_param); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); sec_num = (sec_num << 1) | 0x1; } *first_ucode_section = last_read_idx; iwl_enable_interrupts(trans); if (trans->trans_cfg->use_tfh) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFFFFFF); } else { if (cpu == 1) iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); } return 0; } static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int i, ret = 0; u32 last_read_idx = 0; if (cpu == 1) *first_ucode_section = 0; else (*first_ucode_section)++; for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; } *first_ucode_section = last_read_idx; return 0; } static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) { enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; struct iwl_dram_data *frag; if (!iwl_trans_dbg_ini_valid(trans)) return; if (le32_to_cpu(fw_mon_cfg->buf_location) == IWL_FW_INI_LOCATION_SRAM_PATH) { IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); /* set sram monitor by enabling bit 7 */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); return; } if (le32_to_cpu(fw_mon_cfg->buf_location) != IWL_FW_INI_LOCATION_DRAM_PATH || !trans->dbg.fw_mon_ini[alloc_id].num_frags) return; frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", alloc_id); iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, frag->physical >> MON_BUFF_SHIFT_VER2); iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, (frag->physical + frag->size - 256) >> MON_BUFF_SHIFT_VER2); } void iwl_pcie_apply_destination(struct iwl_trans *trans) { const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; int i; if (iwl_trans_dbg_ini_valid(trans)) { iwl_pcie_apply_destination_ini(trans); return; } IWL_INFO(trans, "Applying debug destination %s\n", get_fw_dbg_mode_string(dest->monitor_mode)); if (dest->monitor_mode == EXTERNAL_MODE) iwl_pcie_alloc_fw_monitor(trans, dest->size_power); else IWL_WARN(trans, "PCI should have external buffer debug\n"); for (i = 0; i < trans->dbg.n_dest_reg; i++) { u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 val = le32_to_cpu(dest->reg_ops[i].val); switch (dest->reg_ops[i].op) { case CSR_ASSIGN: iwl_write32(trans, addr, val); break; case CSR_SETBIT: iwl_set_bit(trans, addr, BIT(val)); break; case CSR_CLEARBIT: iwl_clear_bit(trans, addr, BIT(val)); break; case PRPH_ASSIGN: iwl_write_prph(trans, addr, val); break; case PRPH_SETBIT: iwl_set_bits_prph(trans, addr, BIT(val)); break; case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; case PRPH_BLOCKBIT: if (iwl_read_prph(trans, addr) & BIT(val)) { IWL_ERR(trans, "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", val, addr); goto monitor; } break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); break; } } monitor: if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), fw_mon->physical >> dest->base_shift); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (fw_mon->physical + fw_mon->size - 256) >> dest->end_shift); else iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (fw_mon->physical + fw_mon->size) >> dest->end_shift); } } static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); /* load to FW the binary non secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); if (ret) return ret; if (image->is_dual_cpus) { /* set CPU2 header address */ iwl_write_prph(trans, LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, LMPM_SECURE_CPU2_HDR_MEM_SPACE); /* load to FW the binary sections of CPU2 */ ret = iwl_pcie_load_cpu_sections(trans, image, 2, &first_ucode_section); if (ret) return ret; } if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); iwl_enable_interrupts(trans); /* release CPU reset */ iwl_write32(trans, CSR_RESET, 0); return 0; } static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", iwl_read_prph(trans, WFPM_GP2)); /* * Set default value. On resume reading the values that were * zeored can provide debug data on the resume flow. * This is for debugging only and has no functional impact. */ iwl_write_prph(trans, WFPM_GP2, 0x01010101); /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); /* load to FW the binary Secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, &first_ucode_section); if (ret) return ret; /* load to FW the binary sections of CPU2 */ return iwl_pcie_load_cpu_sections_8000(trans, image, 2, &first_ucode_section); } bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); bool report; if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); if (prev != report) iwl_trans_pcie_rf_kill(trans, report); return hw_rfkill; } struct iwl_causes_list { u32 cause_num; u32 mask_reg; u8 addr; }; static const struct iwl_causes_list causes_list_common[] = { {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; static const struct iwl_causes_list causes_list_pre_bz[] = { {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, }; static const struct iwl_causes_list causes_list_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15}, }; static void iwl_pcie_map_list(struct iwl_trans *trans, const struct iwl_causes_list *causes, int arr_size, int val) { int i; for (i = 0; i < arr_size; i++) { iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); iwl_clear_bit(trans, causes[i].mask_reg, causes[i].cause_num); } } static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ iwl_pcie_map_list(trans, causes_list_common, ARRAY_SIZE(causes_list_common), val); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_pcie_map_list(trans, causes_list_bz, ARRAY_SIZE(causes_list_bz), val); else iwl_pcie_map_list(trans, causes_list_pre_bz, ARRAY_SIZE(causes_list_pre_bz), val); } static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 offset = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; u32 val, idx; /* * The first RX queue - fallback queue, which is designated for * management frame, command responses etc, is always mapped to the * first interrupt vector. The other RX queues are mapped to * the other (N - 2) interrupt vectors. */ val = BIT(MSIX_FH_INT_CAUSES_Q(0)); for (idx = 1; idx < trans->num_rx_queues; idx++) { iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), MSIX_FH_INT_CAUSES_Q(idx - offset)); val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); } iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); val = MSIX_FH_INT_CAUSES_Q(0); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) val |= MSIX_NON_AUTO_CLEAR_CAUSE; iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { if (trans->trans_cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); return; } /* * The IVAR table needs to be configured again after reset, * but if the device is disabled, we can't write to * prph. */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* * Each cause from the causes list above and the RX causes is * represented as a byte in the IVAR table. The first nibble * represents the bound interrupt vector of the cause, the second * represents no auto clear for this cause. This will be set if its * interrupt vector is bound to serve other causes. */ iwl_pcie_map_rx_causes(trans); iwl_pcie_map_non_rx_causes(trans); } static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) return; trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); trans_pcie->fh_mask = trans_pcie->fh_init_mask; trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); trans_pcie->hw_mask = trans_pcie->hw_init_mask; } static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); } } /* Make sure (redundant) we've released our request to stay awake */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); /* re-take ownership to prevent other users from stealing the device */ iwl_trans_pcie_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); } void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->msix_enabled) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) synchronize_irq(trans_pcie->msix_entries[i].vector); } else { synchronize_irq(trans_pcie->pci_dev->irq); } } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } /* * Now, we load the firmware and don't want to be interrupted, even * by the RF-Kill interrupt (hence mask all the interrupt besides the * FH_TX interrupt which is needed to load the firmware). If the * RF-Kill switch is toggled, we will find out after having loaded * the firmware and return the proper value to the caller. */ iwl_enable_fw_load_int(trans); /* really make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) ret = iwl_pcie_load_given_ucode_8000(trans, fw); else ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { iwl_pcie_reset_ict(trans); iwl_pcie_tx_start(trans, scd_addr); } void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill) { bool hw_rfkill; /* * Check again since the RF kill state may have changed while * all the interrupts were disabled, in this case we couldn't * receive the RF kill interrupt and update the state in the * op_mode. * Don't call the op_mode if the rkfill state hasn't changed. * This allows the op_mode to call stop_device from the rfkill * notification without endless recursion. Under very rare * circumstances, we might have a small recursion if the rfkill * state changed exactly now while we were called from stop_device. * This is very unlikely but can happen and is supported. */ hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } if (hw_rfkill != was_in_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, NULL); mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { if (trans->trans_cfg->gen2) _iwl_trans_pcie_gen2_stop_device(trans); else _iwl_trans_pcie_stop_device(trans); } } void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, bool test, bool reset) { iwl_disable_interrupts(trans); /* * in testing mode, the host stays awake and the * hardware won't be reset (not even partially) */ if (test) return; iwl_pcie_disable_ict(trans); iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try * to execute some invalid memory upon resume */ iwl_trans_pcie_tx_reset(trans); } iwl_pcie_set_pwr(trans, true); } static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : UREG_DOORBELL_TO_ISR6_RESUME); } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : CSR_IPC_SLEEP_CONTROL_RESUME); iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); } else { return 0; } ret = wait_event_timeout(trans_pcie->sx_waitq, trans_pcie->sx_complete, 2 * HZ); /* Invalidate it toward next suspend or resume */ trans_pcie->sx_complete = false; if (!ret) { IWL_ERR(trans, "Timeout %s D3\n", suspend ? "entering" : "exiting"); return -ETIMEDOUT; } return 0; } static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); ret = iwl_pcie_d3_handshake(trans, true); if (ret) return ret; iwl_pcie_d3_complete_suspend(trans, test, reset); return 0; } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; int ret; if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; ret = 0; goto out; } iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_finish_nic_init(trans); if (ret) return ret; /* * Reconfigure IVAR table in case of MSIX or reset ict table in * MSI mode since HW reset erased it. * Also enables interrupts - none will happen as * the device doesn't know we're waking it up, only when * the opmode actually tells it after this call. */ iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); iwl_pcie_set_pwr(trans, false); if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { iwl_trans_pcie_tx_reset(trans); ret = iwl_pcie_rx_init(trans); if (ret) { IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); return ret; } } IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", iwl_read_umac_prph(trans, WFPM_GP2)); val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) *status = IWL_D3_STATUS_RESET; else *status = IWL_D3_STATUS_ALIVE; out: if (*status == IWL_D3_STATUS_ALIVE) ret = iwl_pcie_d3_handshake(trans, false); return ret; } static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; if (!cfg_trans->mq_rx_supported) goto enable_msi; if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); for (i = 0; i < max_irqs; i++) trans_pcie->msix_entries[i].entry = i; num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, MSIX_MIN_INTERRUPT_VECTORS, max_irqs); if (num_irqs < 0) { IWL_DEBUG_INFO(trans, "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", num_irqs); goto enable_msi; } trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; IWL_DEBUG_INFO(trans, "MSI-X enabled. %d interrupt vectors were allocated\n", num_irqs); /* * In case the OS provides fewer interrupts than requested, different * causes will share the same interrupt vector as follows: * One interrupt less: non rx causes shared with FBQ. * Two interrupts less: non rx causes shared with FBQ and RSS. * More than two interrupts: we will use fewer RSS queues. */ if (num_irqs <= max_irqs - 2) { trans_pcie->trans->num_rx_queues = num_irqs + 1; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | IWL_SHARED_IRQ_FIRST_RSS; } else if (num_irqs == max_irqs - 1) { trans_pcie->trans->num_rx_queues = num_irqs; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } IWL_DEBUG_INFO(trans, "MSI-X enabled with rx queues %d, vec mask 0x%x\n", trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; trans_pcie->msix_enabled = true; return; enable_msi: ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } } } static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) { int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; offset = 1 + i; for (; i < iter_rx_q ; i++) { /* * Get the cpu prior to the place to search * (i.e. return will be > i - 1). */ cpu = cpumask_next(i - offset, cpu_online_mask); cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, &trans_pcie->affinity_mask[i]); if (ret) IWL_ERR(trans_pcie->trans, "Failed to set affinity mask for IRQ %d\n", trans_pcie->msix_entries[i].vector); } } static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) { int ret; struct msix_entry *msix_entry; const char *qname = queue_name(&pdev->dev, trans_pcie, i); if (!qname) return -ENOMEM; msix_entry = &trans_pcie->msix_entries[i]; ret = devm_request_threaded_irq(&pdev->dev, msix_entry->vector, iwl_pcie_msix_isr, (i == trans_pcie->def_irq) ? iwl_pcie_irq_msix_handler : iwl_pcie_irq_rx_msix_handler, IRQF_SHARED, qname, msix_entry); if (ret) { IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); return ret; } } iwl_pcie_irq_set_affinity(trans_pcie->trans); return 0; } static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; switch (trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; case IWL_DEVICE_FAMILY_22000: wprot = PREG_PRPH_WPROT_22000; break; default: return 0; } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); if (wprot_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; } iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); } return 0; } static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) { int ret; ret = iwl_finish_nic_init(trans); if (ret < 0) return ret; iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); udelay(20); iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_PG_EN | HPM_HIPM_GEN_CFG_CR_SLP_EN); udelay(20); iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); return iwl_trans_pcie_sw_reset(trans, true); } static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int err; lockdep_assert_held(&trans_pcie->mutex); err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); return err; } err = iwl_trans_pcie_clear_persistence_bit(trans); if (err) return err; err = iwl_trans_pcie_sw_reset(trans, true); if (err) return err; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && trans->trans_cfg->integrated) { err = iwl_pcie_gen2_force_power_gating(trans); if (err) return err; } err = iwl_pcie_apm_init(trans); if (err) return err; iwl_pcie_init_msix(trans_pcie); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); trans_pcie->opmode_down = false; /* Set is_down to false here so that...*/ trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ iwl_pcie_check_hw_rf_kill(trans); return 0; } static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; mutex_lock(&trans_pcie->mutex); ret = _iwl_trans_pcie_start_hw(trans); mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); mutex_lock(&trans_pcie->mutex); /* disable interrupts - don't enable HW RF kill interrupt */ iwl_disable_interrupts(trans); iwl_pcie_apm_stop(trans, true); iwl_disable_interrupts(trans); iwl_pcie_disable_ict(trans); mutex_unlock(&trans_pcie->mutex); iwl_pcie_synchronize_irqs(trans); } #if defined(__linux__) static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } #elif defined(__FreeBSD__) static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { IWL_DEBUG_PCI_RW(trans, "W1 %#010x %#04x\n", ofs, val); bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { IWL_DEBUG_PCI_RW(trans, "W4 %#010x %#010x\n", ofs, val); bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { u32 v; v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs); IWL_DEBUG_PCI_RW(trans, "R4 %#010x %#010x\n", ofs, v); return (v); } #endif static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return 0x00FFFFFF; else return 0x000FFFFF; } static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, ((reg & mask) | (3 << 24))); return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24))); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.page_offs = trans_cfg->cb_data_offs; trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; if (trans_pcie->n_no_reclaim_cmds) memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_page_order = iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); trans_pcie->rx_buf_bytes = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; iwl_pcie_synchronize_irqs(trans); if (trans->trans_cfg->gen2) iwl_txq_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); if (trans_pcie->rba.alloc_wq) { destroy_workqueue(trans_pcie->rba.alloc_wq); trans_pcie->rba.alloc_wq = NULL; } if (trans_pcie->msix_enabled) { for (i = 0; i < trans_pcie->alloc_vecs; i++) { irq_set_affinity_hint( trans_pcie->msix_entries[i].vector, NULL); } trans_pcie->msix_enabled = false; } else { iwl_pcie_free_ict(trans); } iwl_pcie_free_fw_monitor(trans); if (trans_pcie->pnvm_dram.size) dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, trans_pcie->pnvm_dram.block, trans_pcie->pnvm_dram.physical); if (trans_pcie->reduce_power_dram.size) dma_free_coherent(trans->dev, trans_pcie->reduce_power_dram.size, trans_pcie->reduce_power_dram.block, trans_pcie->reduce_power_dram.physical); mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) { if (state) set_bit(STATUS_TPOWER_PMI, &trans->status); else clear_bit(STATUS_TPOWER_PMI, &trans->status); } struct iwl_trans_pcie_removal { struct pci_dev *pdev; struct work_struct work; }; static void iwl_trans_pcie_removal_wk(struct work_struct *wk) { struct iwl_trans_pcie_removal *removal = container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); pci_unlock_rescan_remove(); kfree(removal); module_put(THIS_MODULE); } /* * This version doesn't disable BHs but rather assumes they're * already disabled. */ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; } /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); /* * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * HW with volatile SRAM must save/restore contents to/from * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP * check is a good idea before accessing the SRAM of HW with * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl); iwl_trans_pcie_dump_regs(trans); if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { struct iwl_trans_pcie_removal *removal; if (test_bit(STATUS_TRANS_DEAD, &trans->status)) goto err; IWL_ERR(trans, "Device gone - scheduling removal!\n"); /* * get a module reference to avoid doing this * while unloading anyway and to avoid * scheduling a work with code that's being * removed. */ if (!try_module_get(THIS_MODULE)) { IWL_ERR(trans, "Module is being unloaded - abort\n"); goto err; } removal = kzalloc(sizeof(*removal), GFP_ATOMIC); if (!removal) { module_put(THIS_MODULE); goto err; } /* * we don't need to clear this flag, because * the trans will be freed and reallocated. */ set_bit(STATUS_TRANS_DEAD, &trans->status); removal->pdev = to_pci_dev(trans->dev); INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); pci_dev_get(removal->pdev); schedule_work(&removal->work); } else { iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); } err: spin_unlock(&trans_pcie->reg_lock); return false; } out: /* * Fool sparse by faking we release the lock - sparse will * track nic_access anyway. */ __release(&trans_pcie->reg_lock); return true; } static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { bool ret; local_bh_disable(); ret = __iwl_trans_pcie_grab_nic_access(trans); if (ret) { /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ return ret; } local_bh_enable(); return false; } static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->reg_lock); /* * Fool sparse by faking we acquiring the lock - sparse will * track nic_access anyway. */ __acquire(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the * MAC_ACCESS_REQ bit to be performed before any other writes * scheduled on different CPUs (after we drop reg_lock). */ out: spin_unlock_bh(&trans_pcie->reg_lock); } static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { int offs = 0; u32 *vals = buf; while (offs < dwords) { /* limit the time we spin here under lock to 1/2s */ unsigned long end = jiffies + HZ / 2; bool resched = false; if (iwl_trans_grab_nic_access(trans)) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr + 4 * offs); while (offs < dwords) { vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); offs++; if (time_after(jiffies, end)) { resched = true; break; } } iwl_trans_release_nic_access(trans); if (resched) cond_resched(); } else { return -EBUSY; } } return 0; } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, const void *buf, int dwords) { int offs, ret = 0; const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); for (offs = 0; offs < dwords; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals ? vals[offs] : 0); iwl_trans_release_nic_access(trans); } else { ret = -EBUSY; } return ret; } static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, u32 *val) { return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, ofs, val); } static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans->txqs.txq[i]; if (i == trans->txqs.cmd.q_id) continue; spin_lock_bh(&txq->lock); if (!block && !(WARN_ON_ONCE(!txq->block))) { txq->block--; if (!txq->block) { iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (i << 8)); } } else if (block) { txq->block++; } spin_unlock_bh(&txq->lock); } } #define IWL_FLUSH_WAIT_MS 2000 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (queue >= trans->num_rx_queues || !trans_pcie->rxq) return -EINVAL; data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; data->fr_bd_wid = 0; return 0; } static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; u8 wr_ptr; /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!test_bit(txq_idx, trans->txqs.queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans->txqs.txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); wr_ptr = READ_ONCE(txq->write_ptr); while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || overflow_tx) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { u8 write_ptr = READ_ONCE(txq->write_ptr); /* * If write pointer moved during the wait, warn only * if the TX came from op mode. In case TX came from * trans layer (overflow TX) don't warn. */ if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, "WR pointer moved while flushing %d -> %d\n", wr_ptr, write_ptr)) return -ETIMEDOUT; wr_ptr = write_ptr; usleep_range(1000, 2000); spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); } if (txq->read_ptr != txq->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues Q %d\n", txq_idx); iwl_txq_log_scd_error(trans, txq); return -ETIMEDOUT; } IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); return 0; } static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { int cnt; int ret = 0; /* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->trans_cfg->base_params->num_of_queues; cnt++) { if (cnt == trans->txqs.cmd.q_id) continue; if (!test_bit(cnt, trans->txqs.queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); if (ret) break; } return ret; } static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_bh(&trans_pcie->reg_lock); __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); spin_unlock_bh(&trans_pcie->reg_lock); } static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); IWL_CMD(CSR_INT_COALESCING); IWL_CMD(CSR_INT); IWL_CMD(CSR_INT_MASK); IWL_CMD(CSR_FH_INT_STATUS); IWL_CMD(CSR_GPIO_IN); IWL_CMD(CSR_RESET); IWL_CMD(CSR_GP_CNTRL); IWL_CMD(CSR_HW_REV); IWL_CMD(CSR_EEPROM_REG); IWL_CMD(CSR_EEPROM_GP); IWL_CMD(CSR_OTP_GP_REG); IWL_CMD(CSR_GIO_REG); IWL_CMD(CSR_GP_UCODE_REG); IWL_CMD(CSR_GP_DRIVER_REG); IWL_CMD(CSR_UCODE_DRV_GP1); IWL_CMD(CSR_UCODE_DRV_GP2); IWL_CMD(CSR_LED_REG); IWL_CMD(CSR_DRAM_INT_TBL_REG); IWL_CMD(CSR_GIO_CHICKEN_BITS); IWL_CMD(CSR_ANA_PLL_CFG); IWL_CMD(CSR_HW_REV_WA_REG); IWL_CMD(CSR_MONITOR_STATUS_REG); IWL_CMD(CSR_DBG_HPET_MEM_REG); default: return "UNKNOWN"; } #undef IWL_CMD } void iwl_pcie_dump_csr(struct iwl_trans *trans) { int i; static const u32 csr_tbl[] = { CSR_HW_IF_CONFIG_REG, CSR_INT_COALESCING, CSR_INT, CSR_INT_MASK, CSR_FH_INT_STATUS, CSR_GPIO_IN, CSR_RESET, CSR_GP_CNTRL, CSR_HW_REV, CSR_EEPROM_REG, CSR_EEPROM_GP, CSR_OTP_GP_REG, CSR_GIO_REG, CSR_GP_UCODE_REG, CSR_GP_DRIVER_REG, CSR_UCODE_DRV_GP1, CSR_UCODE_DRV_GP2, CSR_LED_REG, CSR_DRAM_INT_TBL_REG, CSR_GIO_CHICKEN_BITS, CSR_ANA_PLL_CFG, CSR_MONITOR_STATUS_REG, CSR_HW_REV_WA_REG, CSR_DBG_HPET_MEM_REG }; IWL_ERR(trans, "CSR values:\n"); IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " "CSR_INT_PERIODIC_REG)\n"); for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { IWL_ERR(trans, " %25s: 0X%08x\n", get_csr_string(csr_tbl[i]), iwl_read32(trans, csr_tbl[i])); } } #ifdef CONFIG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops); \ } while (0) /* file operation */ #define DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; struct iwl_dbgfs_tx_queue_priv { struct iwl_trans *trans; }; struct iwl_dbgfs_tx_queue_state { loff_t pos; }; static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state; if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; state->pos = *pos; return state; } static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; *pos = ++state->pos; if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) return NULL; return state; } static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) { kfree(v); } static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_trans *trans = priv->trans; struct iwl_txq *txq = trans->txqs.txq[state->pos]; seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", (unsigned int)state->pos, !!test_bit(state->pos, trans->txqs.queue_used), !!test_bit(state->pos, trans->txqs.queue_stopped)); if (txq) seq_printf(seq, "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", txq->read_ptr, txq->write_ptr, txq->need_update, txq->frozen, txq->n_window, txq->ampdu); else seq_puts(seq, "(unallocated)"); if (state->pos == trans->txqs.cmd.q_id) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); return 0; } static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { .start = iwl_dbgfs_tx_queue_seq_start, .next = iwl_dbgfs_tx_queue_seq_next, .stop = iwl_dbgfs_tx_queue_seq_stop, .show = iwl_dbgfs_tx_queue_seq_show, }; static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) { struct iwl_dbgfs_tx_queue_priv *priv; priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, sizeof(*priv)); if (!priv) return -ENOMEM; priv->trans = inode->i_private; return 0; } static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char *buf; int pos = 0, i, ret; size_t bufsz; bufsz = sizeof(char) * 121 * trans->num_rx_queues; if (!trans_pcie->rxq) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", i); pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", rxq->read); pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", rxq->write); pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", rxq->write_actual); pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", rxq->need_update); pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)); pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: %u\n", r & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; int pos = 0; char *buf; int bufsz = 24 * 64; /* 24 items * 64 char per item */ ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", isr_stats->hw); pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", isr_stats->sw); if (isr_stats->sw || isr_stats->hw) { pos += scnprintf(buf + pos, bufsz - pos, "\tLast Restarting Code: 0x%X\n", isr_stats->err_code); } #ifdef CONFIG_IWLWIFI_DEBUG pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", isr_stats->sch); pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", isr_stats->alive); #endif pos += scnprintf(buf + pos, bufsz - pos, "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", isr_stats->ctkill); pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", isr_stats->wakeup); pos += scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", isr_stats->rx); pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", isr_stats->tx); pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", isr_stats->unhandled); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 reset_flag; int ret; ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); if (ret) return ret; if (reset_flag == 0) memset(isr_stats, 0, sizeof(*isr_stats)); return count; } static ssize_t iwl_dbgfs_csr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; iwl_pcie_dump_csr(trans); return count; } static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; char *buf = NULL; ssize_t ret; ret = iwl_dump_fh(trans, &buf); if (ret < 0) return ret; if (!buf) return -EINVAL; ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t iwl_dbgfs_rfkill_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char buf[100]; int pos; pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", trans_pcie->debug_rfkill, !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rfkill_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool new_value; int ret; ret = kstrtobool_from_user(user_buf, count, &new_value); if (ret) return ret; if (new_value == trans_pcie->debug_rfkill) return count; IWL_WARN(trans, "changing debug rfkill %d->%d\n", trans_pcie->debug_rfkill, new_value); trans_pcie->debug_rfkill = new_value; iwl_pcie_handle_rfkill_irq(trans); return count; } static int iwl_dbgfs_monitor_data_open(struct inode *inode, struct file *file) { struct iwl_trans *trans = inode->i_private; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans->dbg.dest_tlv || trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { IWL_ERR(trans, "Debug destination is not set to DRAM\n"); return -ENOENT; } if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) return -EBUSY; trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; return simple_open(inode, file); } static int iwl_dbgfs_monitor_data_release(struct inode *inode, struct file *file) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(inode->i_private); if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; return 0; } static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, void *buf, ssize_t *size, ssize_t *bytes_copied) { int buf_size_left = count - *bytes_copied; buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); if (*size > buf_size_left) *size = buf_size_left; *size -= copy_to_user(user_buf, buf, *size); *bytes_copied += *size; if (buf_size_left == *size) return true; return false; } static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; struct cont_rec *data = &trans_pcie->fw_mon_data; u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; ssize_t size, bytes_copied = 0; bool b_full; if (trans->dbg.dest_tlv) { write_ptr_addr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); } else { write_ptr_addr = MON_BUFF_WRPTR; wrap_cnt_addr = MON_BUFF_CYCLE_CNT; } if (unlikely(!trans->dbg.rec_on)) return 0; mutex_lock(&data->mutex); if (data->state == IWL_FW_MON_DBGFS_STATE_DISABLED) { mutex_unlock(&data->mutex); return 0; } /* write_ptr position in bytes rather then DW */ write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); if (data->prev_wrap_cnt == wrap_cnt) { size = write_ptr - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; } else if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr < data->prev_wr_ptr) { size = trans->dbg.fw_mon.size - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; if (!b_full) { size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt++; } } else { if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr > data->prev_wr_ptr) IWL_WARN(trans, "write pointer passed previous write pointer, start copying from the beginning\n"); else if (!unlikely(data->prev_wrap_cnt == 0 && data->prev_wr_ptr == 0)) IWL_WARN(trans, "monitor data is out of sync, start copying from the beginning\n"); size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt = wrap_cnt; } mutex_unlock(&data->mutex); return bytes_copied; } static ssize_t iwl_dbgfs_rf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->rf_name[0]) return -ENODEV; return simple_read_from_buffer(user_buf, count, ppos, trans_pcie->rf_name, strlen(trans_pcie->rf_name)); } DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_READ_WRITE_FILE_OPS(rfkill); DEBUGFS_READ_FILE_OPS(rf); static const struct file_operations iwl_dbgfs_tx_queue_ops = { .owner = THIS_MODULE, .open = iwl_dbgfs_tx_queue_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static const struct file_operations iwl_dbgfs_monitor_data_ops = { .read = iwl_dbgfs_monitor_data_read, .open = iwl_dbgfs_monitor_data_open, .release = iwl_dbgfs_monitor_data_release, }; /* Create the debugfs files and directories */ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir; DEBUGFS_ADD_FILE(rx_queue, dir, 0400); DEBUGFS_ADD_FILE(tx_queue, dir, 0400); DEBUGFS_ADD_FILE(interrupt, dir, 0600); DEBUGFS_ADD_FILE(csr, dir, 0200); DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); DEBUGFS_ADD_FILE(rf, dir, 0400); } static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct cont_rec *data = &trans_pcie->fw_mon_data; mutex_lock(&data->mutex); data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; mutex_unlock(&data->mutex); } #endif /*CONFIG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { u32 cmdlen = 0; int i; for (i = 0; i < trans->txqs.tfd.max_tbs; i++) cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); return cmdlen; } static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, int allocated_rb_nums) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = trans_pcie->rx_buf_bytes; /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; i = (i + 1) & RX_QUEUE_MASK, j++) { struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; struct iwl_fw_error_dump_rb *rb; dma_sync_single_for_cpu(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); rb_len += sizeof(**data) + sizeof(*rb) + max_len; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); rb = (void *)(*data)->data; rb->index = cpu_to_le32(i); memcpy(rb->data, page_address(rxb->page), max_len); *data = iwl_fw_error_next_data(*data); } spin_unlock(&rxq->lock); return rb_len; } #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; __le32 *val; int i; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); val = (void *)(*data)->data; for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); *data = iwl_fw_error_next_data(*data); return csr_len; } static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; __le32 *val; int i; if (!iwl_trans_grab_nic_access(trans)) return 0; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); (*data)->len = cpu_to_le32(fh_regs_len); val = (void *)(*data)->data; if (!trans->trans_cfg->gen2) for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); else for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, i)); iwl_trans_release_nic_access(trans); *data = iwl_fw_error_next_data(*data); return sizeof(**data) + fh_regs_len; } static u32 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data, u32 monitor_len) { u32 buf_size_in_dwords = (monitor_len >> 2); u32 *buffer = (u32 *)fw_mon_data->data; u32 i; if (!iwl_trans_grab_nic_access(trans)) return 0; iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); for (i = 0; i < buf_size_in_dwords; i++) buffer[i] = iwl_read_umac_prph_no_grab(trans, MON_DMARB_RD_DATA_ADDR); iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); iwl_trans_release_nic_access(trans); return monitor_len; } static void iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; } else if (trans->dbg.dest_tlv) { write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); } else { base = MON_BUFF_BASE_ADDR; write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; /* convert wrtPtr to DWs, to align with all HWs */ write_ptr_val >>= 2; } fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); } static u32 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, u32 monitor_len) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; u32 len = 0; if (trans->dbg.dest_tlv || (fw_mon->size && (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_mon_data = (void *)(*data)->data; iwl_trans_pcie_dump_pointers(trans, fw_mon_data); len += sizeof(**data) + sizeof(*fw_mon_data); if (fw_mon->size) { memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); monitor_len = fw_mon->size; } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); /* * Update pointers to reflect actual values after * shifting */ if (trans->dbg.dest_tlv->version) { base = (iwl_read_prph(trans, base) & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; } else { base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; } iwl_trans_read_mem(trans, base, fw_mon_data->data, monitor_len / sizeof(u32)); } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { monitor_len = iwl_trans_pci_dump_marbh_monitor(trans, fw_mon_data, monitor_len); } else { /* Didn't match anything - output no monitor data */ monitor_len = 0; } len += monitor_len; (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); } return len; } static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) { if (trans->dbg.fw_mon.size) { *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + trans->dbg.fw_mon.size; return trans->dbg.fw_mon.size; } else if (trans->dbg.dest_tlv) { u32 base, end, cfg_reg, monitor_len; if (trans->dbg.dest_tlv->version == 1) { cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); cfg_reg = iwl_read_prph(trans, cfg_reg); base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; monitor_len = (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> trans->dbg.dest_tlv->end_shift; monitor_len *= IWL_M2S_UNIT_SIZE; } else { base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; end = iwl_read_prph(trans, end) << trans->dbg.dest_tlv->end_shift; /* Make "end" point to the actual end */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 || trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg.dest_tlv->end_shift); monitor_len = end - base; } *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + monitor_len; return monitor_len; } return 0; } static struct iwl_trans_dump_data * iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->trans_cfg->mq_rx_supported && dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); if (!dump_mask) return NULL; /* transport dump header */ len = sizeof(*dump_data); /* host commands */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) len += sizeof(*data) + cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); /* CSR registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { if (trans->trans_cfg->gen2) len += sizeof(*data) + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); else len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); } if (dump_rbs) { /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ num_rbs = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); } /* Paged memory for gen2 HW */ if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + trans->init_dram.paging[i].size; dump_data = vzalloc(len); if (!dump_data) return NULL; len = 0; data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { u16 tfd_size = trans->txqs.tfd.size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); u8 tfdidx; u32 caplen, cmdlen; if (trans->trans_cfg->use_tfh) tfdidx = idx; else tfdidx = ptr; cmdlen = iwl_trans_pcie_get_cmdlen(trans, (u8 *)cmdq->tfds + tfd_size * tfdidx); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); if (cmdlen) { len += sizeof(*txcmd) + caplen; txcmd->cmdlen = cpu_to_le32(cmdlen); txcmd->caplen = cpu_to_le32(caplen); memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); if (sanitize_ops && sanitize_ops->frob_hcmd) sanitize_ops->frob_hcmd(sanitize_ctx, txcmd->data, caplen); txcmd = (void *)((u8 *)txcmd->data + caplen); } ptr = iwl_txq_dec_wrap(trans, ptr); } spin_unlock_bh(&cmdq->lock); data->len = cpu_to_le32(len); len += sizeof(*data); data = iwl_fw_error_next_data(data); } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += iwl_trans_pcie_dump_csr(trans, &data); if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; u32 page_len = trans->init_dram.paging[i].size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); data->len = cpu_to_le32(sizeof(*paging) + page_len); paging = (void *)data->data; paging->index = cpu_to_le32(i); memcpy(paging->data, trans->init_dram.paging[i].block, page_len); data = iwl_fw_error_next_data(data); len += sizeof(*data) + sizeof(*paging) + page_len; } } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; return dump_data; } static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) { if (enable) iwl_enable_interrupts(trans); else iwl_disable_interrupts(trans); } static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { u32 inta_addr, sw_err_bit; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->msix_enabled) { inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; else sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; } else { inta_addr = CSR_INT; sw_err_bit = CSR_INT_BIT_SW_ERR; } iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); } #define IWL_TRANS_COMMON_OPS \ .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ .write8 = iwl_trans_pcie_write8, \ .write32 = iwl_trans_pcie_write32, \ .read32 = iwl_trans_pcie_read32, \ .read_prph = iwl_trans_pcie_read_prph, \ .write_prph = iwl_trans_pcie_write_prph, \ .read_mem = iwl_trans_pcie_read_mem, \ .write_mem = iwl_trans_pcie_write_mem, \ .read_config32 = iwl_trans_pcie_read_config32, \ .configure = iwl_trans_pcie_configure, \ .set_pmi = iwl_trans_pcie_set_pmi, \ .sw_reset = iwl_trans_pcie_sw_reset, \ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ .release_nic_access = iwl_trans_pcie_release_nic_access, \ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume, \ .interrupts = iwl_trans_pci_interrupts, \ .sync_nmi = iwl_trans_pcie_sync_nmi, \ .imr_dma_data = iwl_trans_pcie_copy_imr \ static const struct iwl_trans_ops trans_ops_pcie = { IWL_TRANS_COMMON_OPS, .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, .send_cmd = iwl_pcie_enqueue_hcmd, .tx = iwl_trans_pcie_tx, .reclaim = iwl_txq_reclaim, .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, .freeze_txq_timer = iwl_trans_txq_freeze_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; static const struct iwl_trans_ops trans_ops_pcie_gen2 = { IWL_TRANS_COMMON_OPS, .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_gen2_fw_alive, .start_fw = iwl_trans_pcie_gen2_start_fw, .stop_device = iwl_trans_pcie_gen2_stop_device, .send_cmd = iwl_pcie_gen2_enqueue_hcmd, .tx = iwl_txq_gen2_tx, .reclaim = iwl_txq_reclaim, .set_q_ptrs = iwl_txq_set_q_ptrs, .txq_alloc = iwl_txq_dyn_alloc, .txq_free = iwl_txq_dyn_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; if (!cfg_trans->gen2) ops = &trans_ops_pcie; ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, cfg_trans); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; trans_pcie->opmode_down = true; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->alloc_page_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->fw_reset_waitq); init_waitqueue_head(&trans_pcie->imr_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; goto out_free_trans; } INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); trans_pcie->debug_rfkill = -1; if (!cfg_trans->base_params->pcie_l1_allowed) { /* * W/A - seems to solve weird behavior. We need to remove this * if we don't want to stay in L1 all the time. This wastes a * lot of power. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); } trans_pcie->def_rx_queue = 0; pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_no_pci; } } ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); if (ret) { dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); goto out_no_pci; } #if defined(__FreeBSD__) linuxkpi_pcim_want_to_use_bus_functions(pdev); #endif table = pcim_iomap_table(pdev); if (!table) { dev_err(&pdev->dev, "pcim_iomap_table failed\n"); ret = -ENOMEM; goto out_no_pci; } trans_pcie->hw_base = table[0]; if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); ret = -ENODEV; goto out_no_pci; } /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); trans->hw_rev = iwl_read32(trans, CSR_HW_REV); if (trans->hw_rev == 0xffffffff) { dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); ret = -EIO; goto out_no_pci; } /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have * changed, and now the revision step also includes bit 0-1 (no more * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) trans->hw_rev_step = trans->hw_rev & 0xF; else trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); init_waitqueue_head(&trans_pcie->sx_waitq); if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); if (ret) goto out_no_pci; } else { ret = iwl_pcie_alloc_ict(trans); if (ret) goto out_no_pci; ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } } #ifdef CONFIG_IWLWIFI_DEBUGFS trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; mutex_init(&trans_pcie->fw_mon_data.mutex); #endif iwl_dbg_tlv_init(trans); return trans; out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: destroy_workqueue(trans_pcie->rba.alloc_wq); out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); } void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt) { iwl_write_prph(trans, IMR_UREG_CHICK, iwl_read_prph(trans, IMR_UREG_CHICK) | IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, (u32)(src_addr & 0xFFFFFFFF)); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, iwl_get_dma_hi_addr(src_addr)); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); } int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = -1; trans_pcie->imr_status = IMR_D2S_REQUESTED; iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); ret = wait_event_timeout(trans_pcie->imr_waitq, trans_pcie->imr_status != IMR_D2S_REQUESTED, 5 * HZ); if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); iwl_trans_pcie_dump_regs(trans); return -ETIMEDOUT; } trans_pcie->imr_status = IMR_D2S_IDLE; return 0; } diff --git a/sys/contrib/dev/iwlwifi/queue/tx.c b/sys/contrib/dev/iwlwifi/queue/tx.c index 67ed1145cea0..3f6bda96bfb5 100644 --- a/sys/contrib/dev/iwlwifi/queue/tx.c +++ b/sys/contrib/dev/iwlwifi/queue/tx.c @@ -1,1872 +1,1876 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2020-2022 Intel Corporation */ #ifdef CONFIG_INET #include #endif #include #include "iwl-debug.h" #include "iwl-io.h" #include "fw/api/commands.h" #include "fw/api/tx.h" #include "fw/api/datapath.h" #include "queue/tx.h" #include "iwl-fh.h" #include "iwl-scd.h" #include #if defined(__FreeBSD__) #include #endif /* * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + num_tbs * sizeof(struct iwl_tfh_tb); /* * filled_tfd_size contains the number of filled bytes in the TFD. * Dividing it by 64 will give the number of chunks to fetch * to SRAM- 0 for one chunk, 1 for 2 and so on. * If, for example, TFD contains only 3 TBs then 32 bytes * of the TFD are used, and only one chunk of 64 bytes should * be fetched */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; /* Starting from AX210, the HW expects bytes */ WARN_ON(trans->txqs.bc_table_dword); WARN_ON(len > 0x3FFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; } else { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; /* Before AX210, the HW expects DW */ WARN_ON(!trans->txqs.bc_table_dword); len = DIV_ROUND_UP(len, 4); WARN_ON(len > 0xFFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); scd_bc_tbl->tfd_offset[idx] = bc_ent; } } /* * iwl_txq_inc_wr_ptr - Send new write index to hardware */ void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); /* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); } static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd) { return le16_to_cpu(tfd->num_tbs) & 0x1f; } void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd) { int i, num_tbs; /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); if (num_tbs > trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); } tfd->num_tbs = 0; } void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb; lockdep_assert_held(&txq->lock); if (!txq->entries) return; iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_txq_get_tfd(trans, txq, idx)); skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, dma_addr_t addr, u16 len) { int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; /* * Only WARN here so we know about the issue, but we mess up our * unmap path because not every place currently checks for errors * returned from this function - it can only return an error if * there's no more space, and so when we know there is enough we * don't always check ... */ WARN(iwl_txq_crosses_4g_boundary(addr, len), "possible DMA problem with iova:0x%llx, len:%d\n", (unsigned long long)addr, len); if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; /* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", trans->txqs.tfd.max_tbs); return -EINVAL; } put_unaligned_le64(addr, &tb->addr); tb->tb_len = cpu_to_le16(len); tfd->num_tbs = cpu_to_le16(idx + 1); return idx; } static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { struct page **page_ptr; struct page *ret; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); ret = alloc_page(GFP_ATOMIC); if (!ret) return NULL; /* set the chaining pointer to the previous page if there */ *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; *page_ptr = ret; return ret; } /* * Add a TB and if needed apply the FH HW bug workaround; * meta != NULL indicates that it's a page mapping and we * need to dma_unmap_page() and set the meta->tbs bit in * this case. */ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, dma_addr_t phys, void *virt, u16 len, struct iwl_cmd_meta *meta) { dma_addr_t oldphys = phys; struct page *page; int ret; if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM; if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) goto unmap; if (meta) meta->tbs |= BIT(ret); ret = 0; goto trace; } /* * Work around a hardware bug. If (as expressed in the * condition above) the TB ends on a 32-bit boundary, * then the next TB may be accessed with the wrong * address. * To work around it, copy the data elsewhere and make * a new mapping for it so the device will not fail. */ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { ret = -ENOBUFS; goto unmap; } page = get_workaround_page(trans, skb); if (!page) { ret = -ENOMEM; goto unmap; } memcpy(page_address(page), virt, len); phys = dma_map_single(trans->dev, page_address(page), len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM; ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) { /* unmap the new allocation as single */ oldphys = phys; meta = NULL; goto unmap; } IWL_WARN(trans, "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", len, (unsigned long long)oldphys, (unsigned long long)phys); ret = 0; unmap: if (meta) dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); else dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); trace: trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); return ret; } #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct sk_buff *skb) { struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); struct page **page_ptr; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); if (WARN_ON(*page_ptr)) return NULL; if (!p->page) goto alloc; /* * Check if there's enough room on this page * * Note that we put a page chaining pointer *last* in the * page - we need it somewhere, and if it's there then we * avoid DMA mapping the last bits of the page which may * trigger the 32-bit boundary hardware bug. * * (see also get_workaround_page() in tx-gen2.c) */ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); alloc: p->page = alloc_page(GFP_ATOMIC); if (!p->page) return NULL; p->pos = page_address(p->page); /* set the chaining pointer to NULL */ *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; out: *page_ptr = p->page; get_page(p->page); return p; } #endif static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, u8 hdr_len, struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, start_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; start_hdr = hdr_page->pos; /* * Pull the ieee80211 header to be able to use TSO core, * we will restore it for the tx_status flow. */ skb_pull(skb, hdr_len); /* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU. */ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); tso_start(skb, &tso); while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); unsigned int tb_len; dma_addr_t tb_phys; u8 *subf_hdrs_start = hdr_page->pos; total_len -= data_left; memset(hdr_page->pos, 0, amsdu_pad); hdr_page->pos += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); hdr_page->pos += ETH_ALEN; ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); hdr_page->pos += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; *((__be16 *)hdr_page->pos) = cpu_to_be16(length); hdr_page->pos += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); hdr_page->pos += snap_ip_tcp_hdrlen; tb_len = hdr_page->pos - start_hdr; tb_phys = dma_map_single(trans->dev, start_hdr, tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit * the buggy scenario. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; /* put the payload */ while (data_left) { int ret; tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, tso.data, tb_len, NULL); if (ret) goto out_err; data_left -= tb_len; tso_build_data(skb, &tso, tb_len); } } /* re -add the WiFi header */ skb_push(skb, hdr_len); return 0; out_err: #endif return -EINVAL; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len; void *tb1_addr; tb_phys = iwl_txq_get_first_tb_dma(txq, idx); /* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary). */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd; out_err: iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, struct iwl_cmd_meta *out_meta) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; unsigned int fragsz = skb_frag_size(frag); int ret; if (!fragsz) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, fragsz, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb_frag_address(frag), fragsz, out_meta); if (ret) return ret; } return 0; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len, bool pad) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len, tb1_len, tb2_len; void *tb1_addr; struct sk_buff *frag; tb_phys = iwl_txq_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); /* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary). */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; if (pad) tb1_len = ALIGN(len, 4); else tb1_len = len; /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { int ret; tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb->data + hdr_len, tb2_len, NULL); if (ret) goto out_err; } if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { int ret; tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, frag->data, skb_headlen(frag), NULL); if (ret) goto out_err; if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } return tfd; out_err: iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); int len, hdr_len; bool amsdu; /* There must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); memset(tfd, 0, sizeof(*tfd)); if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) len = sizeof(struct iwl_tx_cmd_gen2); else len = sizeof(struct iwl_tx_cmd_gen3); amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); hdr_len = ieee80211_hdrlen(hdr->frame_control); /* * Only build A-MSDUs here if doing so by GSO, otherwise it may be * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been * built in the higher layers already. */ if (amsdu && skb_shinfo(skb)->gso_size) return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, out_meta, hdr_len, len); return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, hdr_len, len, !amsdu); } int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) { unsigned int max; unsigned int used; /* * To avoid ambiguity between empty and completely full queues, there * should always be less than max_tfd_queue_size elements in the queue. * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) max = q->n_window; else max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; return max - used; } int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans->txqs.txq[txq_id]; u16 cmd_len; int idx; void *tfd; if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", txq_id)) return -EINVAL; if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) return -ENOMEM; spin_lock(&txq->lock); if (iwl_txq_space(trans, txq) < txq->high_mark) { iwl_txq_stop(trans, txq); /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_txq_space(trans, txq) < 3)) { struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); return 0; } } idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); /* Set up driver data for this TFD */ txq->entries[idx].skb = skb; txq->entries[idx].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(idx))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[idx].meta; out_meta->flags = 0; tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); if (!tfd) { spin_unlock(&txq->lock); return -1; } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen3->len); } else { struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen2->len); } /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, iwl_txq_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ spin_unlock(&txq->lock); return 0; } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /* * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's */ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans->txqs.cmd.q_id) { int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; if (!WARN_ON_ONCE(!skb)) iwl_txq_free_tso_page(trans, skb); } iwl_txq_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); } while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); iwl_op_mode_free_skb(trans->op_mode, skb); } spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ iwl_wake_queue(trans, txq); } static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, struct iwl_txq *txq) { struct device *dev = trans->dev; /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, trans->txqs.tfd.size * txq->n_window, txq->tfds, txq->dma_addr); dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); if (txq->bc_tbl.addr) dma_pool_free(trans->txqs.bc_pool, txq->bc_tbl.addr, txq->bc_tbl.dma); kfree(txq); } /* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq; int i; if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", txq_id)) return; txq = trans->txqs.txq[txq_id]; if (WARN_ON(!txq)) return; iwl_txq_gen2_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); } del_timer_sync(&txq->stuck_timer); iwl_txq_gen2_free_memory(trans, txq); trans->txqs.txq[txq_id] = NULL; clear_bit(txq_id, trans->txqs.queue_used); } /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ static int iwl_queue_init(struct iwl_txq *q, int slots_num) { q->n_window = slots_num; /* slots_num must be power-of-two size, otherwise * iwl_txq_get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) return -EINVAL; q->low_mark = q->n_window / 4; if (q->low_mark < 4) q->low_mark = 4; q->high_mark = q->n_window / 8; if (q->high_mark < 2) q->high_mark = 2; q->write_ptr = 0; q->read_ptr = 0; return 0; } int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; u32 tfd_queue_max_size = trans->trans_cfg->base_params->max_tfd_queue_size; txq->need_update = false; /* max_tfd_queue_size must be power-of-two size, otherwise * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), "Max tfd queue size must be a power of two, but is %d", tfd_queue_max_size)) return -EINVAL; /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(txq, slots_num); if (ret) return ret; spin_lock_init(&txq->lock); #ifdef CONFIG_LOCKDEP if (cmd_queue) { static struct lock_class_key iwl_txq_cmd_queue_lock_class; lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); } #endif __skb_queue_head_init(&txq->overflow_q); return 0; } void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) { struct page **page_ptr; struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); next = *page_ptr; *page_ptr = NULL; while (next) { struct page *tmp = next; next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - sizeof(void *)); __free_page(tmp); } } void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) { u32 txq_id = txq->id; u32 status; bool active; u8 fifo; if (trans->trans_cfg->use_tfh) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ return; } status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); IWL_ERR(trans, "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", txq_id, active ? "" : "in", fifo, jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static void iwl_txq_stuck_timer(struct timer_list *t) { struct iwl_txq *txq = from_timer(txq, t, stuck_timer); struct iwl_trans *trans = txq->trans; spin_lock(&txq->lock); /* check if triggered erroneously */ if (txq->read_ptr == txq->write_ptr) { spin_unlock(&txq->lock); return; } spin_unlock(&txq->lock); iwl_txq_log_scd_error(trans, txq); iwl_force_nmi(trans); } int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { size_t tfd_sz = trans->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; if (trans->trans_cfg->use_tfh) tfd_sz = trans->txqs.tfd.size * slots_num; timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); txq->trans = trans; txq->n_window = slots_num; txq->entries = kcalloc(slots_num, sizeof(struct iwl_pcie_txq_entry), GFP_KERNEL); if (!txq->entries) goto error; if (cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), GFP_KERNEL); if (!txq->entries[i].cmd) goto error; } /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, &txq->dma_addr, GFP_KERNEL); if (!txq->tfds) goto error; BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, &txq->first_tb_dma, GFP_KERNEL); if (!txq->first_tb_bufs) goto err_free_tfds; return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); txq->entries = NULL; return -ENOMEM; } static struct iwl_txq * iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) { size_t bc_tbl_size, bc_tbl_entries; struct iwl_txq *txq; int ret; WARN_ON(!trans->txqs.bc_tbl_size); bc_tbl_size = trans->txqs.bc_tbl_size; bc_tbl_entries = bc_tbl_size / sizeof(u16); if (WARN_ON(size > bc_tbl_entries)) return ERR_PTR(-EINVAL); txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) return ERR_PTR(-ENOMEM); txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, &txq->bc_tbl.dma); if (!txq->bc_tbl.addr) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); kfree(txq); return ERR_PTR(-ENOMEM); } ret = iwl_txq_alloc(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue alloc failed\n"); goto error; } ret = iwl_txq_init(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue init failed\n"); goto error; } txq->wd_timeout = msecs_to_jiffies(timeout); return txq; error: iwl_txq_gen2_free_memory(trans, txq); return ERR_PTR(ret); } static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_host_cmd *hcmd) { struct iwl_tx_queue_cfg_rsp *rsp; int ret, qid; u32 wr_ptr; if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != sizeof(*rsp))) { ret = -EINVAL; goto error_free_resp; } rsp = (void *)hcmd->resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); wr_ptr = le16_to_cpu(rsp->write_pointer); if (qid >= ARRAY_SIZE(trans->txqs.txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); ret = -EIO; goto error_free_resp; } if (test_and_set_bit(qid, trans->txqs.queue_used)) { WARN_ONCE(1, "queue %d already used", qid); ret = -EIO; goto error_free_resp; } if (WARN_ONCE(trans->txqs.txq[qid], "queue %d already allocated\n", qid)) { ret = -EIO; goto error_free_resp; } txq->id = qid; trans->txqs.txq[qid] = txq; wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; txq->write_ptr = wr_ptr; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); iwl_free_resp(hcmd); return qid; error_free_resp: iwl_free_resp(hcmd); iwl_txq_gen2_free_memory(trans, txq); return ret; } int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, u8 tid, int size, unsigned int timeout) { struct iwl_txq *txq; union { struct iwl_tx_queue_cfg_cmd old; struct iwl_scd_queue_cfg_cmd new; } cmd; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB, }; int ret; + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && + trans->hw_rev_step == SILICON_A_STEP) + size = 4096; + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); if (IS_ERR(txq)) return PTR_ERR(txq); if (trans->txqs.queue_alloc_cmd_ver == 0) { memset(&cmd.old, 0, sizeof(cmd.old)); cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); cmd.old.tid = tid; if (hweight32(sta_mask) != 1) { ret = -EINVAL; goto error; } cmd.old.sta_id = ffs(sta_mask) - 1; hcmd.id = SCD_QUEUE_CFG; hcmd.len[0] = sizeof(cmd.old); hcmd.data[0] = &cmd.old; } else if (trans->txqs.queue_alloc_cmd_ver == 3) { memset(&cmd.new, 0, sizeof(cmd.new)); cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); cmd.new.u.add.flags = cpu_to_le32(flags); cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); cmd.new.u.add.tid = tid; hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); hcmd.len[0] = sizeof(cmd.new); hcmd.data[0] = &cmd.new; } else { ret = -EOPNOTSUPP; goto error; } ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) goto error; return iwl_txq_alloc_response(trans, txq, &hcmd); error: iwl_txq_gen2_free_memory(trans, txq); return ret; } void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) { if (WARN(queue >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", queue)) return; /* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device. */ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", queue); return; } iwl_txq_gen2_free(trans, queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } void iwl_txq_gen2_tx_free(struct iwl_trans *trans) { int i; memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Free all TX queues */ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { if (!trans->txqs.txq[i]) continue; iwl_txq_gen2_free(trans, i); } } int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) { struct iwl_txq *queue; int ret; /* alloc and init the tx queue */ if (!trans->txqs.txq[txq_id]) { queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM; } trans->txqs.txq[txq_id] = queue; ret = iwl_txq_alloc(trans, queue, queue_size, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } else { queue = trans->txqs.txq[txq_id]; } ret = iwl_txq_init(trans, queue, queue_size, (txq_id == trans->txqs.cmd.q_id)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } trans->txqs.txq[txq_id]->id = txq_id; set_bit(txq_id, trans->txqs.queue_used); return 0; error: iwl_txq_gen2_tx_free(trans); return ret; } static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { struct iwl_tfd *tfd; struct iwl_tfd_tb *tb; dma_addr_t addr; dma_addr_t hi_len; if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); } tfd = _tfd; tb = &tfd->tbs[idx]; addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) <= sizeof(u32)) return addr; hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; /* * shift by 16 twice to avoid warnings on 32-bit * (where this code never runs anyway due to the * if statement above) */ return addr | ((hi_len << 16) << 16); } void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index) { int i, num_tbs; void *tfd = iwl_txq_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); if (num_tbs > trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, iwl_txq_gen1_tfd_tb_get_addr(trans, tfd, i), iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, iwl_txq_gen1_tfd_tb_get_addr(trans, tfd, i), iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); } meta->tbs = 0; if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } else { struct iwl_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } } #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 /* * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl; int write_ptr = txq->write_ptr; int txq_id = txq->id; u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; sec_ctl = tx_cmd->sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } if (trans->txqs.bc_table_dword) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; int txq_id = txq->id; int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); if (txq_id != trans->txqs.cmd.q_id) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } /* * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data * @txq - tx queue * @dma_dir - the direction of the DMA mapping * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; int idx = iwl_txq_get_cmd_index(txq, rd_ptr); struct sk_buff *skb; lockdep_assert_held(&txq->lock); if (!txq->entries) return; /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } void iwl_txq_progress(struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); if (!txq->wd_timeout) return; /* * station is asleep and we send data - that must * be uAPSD or PS-Poll. Don't rearm the timer. */ if (txq->frozen) return; /* * if empty delete timer, otherwise move timer forward * since we're making progress on this queue */ if (txq->read_ptr == txq->write_ptr) del_timer(&txq->stuck_timer); else mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } /* Frees buffers until index _not_ inclusive */ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; int tfd_num = iwl_txq_get_cmd_index(txq, ssn); int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) return; spin_lock_bh(&txq->lock); if (!test_bit(txq_id, trans->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; } if (read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", txq_id, txq->read_ptr, tfd_num, ssn); /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ last_to_free = iwl_txq_dec_wrap(trans, tfd_num); if (!iwl_txq_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_FAKE_TX, NULL); goto out; } if (WARN_ON(!skb_queue_empty(skbs))) goto out; for (; read_ptr != tfd_num; txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_txq_free_tso_page(trans, skb); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; if (!trans->trans_cfg->use_tfh) iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); iwl_txq_free_tfd(trans, txq); } iwl_txq_progress(txq); if (iwl_txq_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; __skb_queue_head_init(&overflow_skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); /* * We are going to transmit from the overflow queue. * Remember this state so that wait_for_txq_empty will know we * are adding more packets to the TFD queue. It cannot rely on * the state of &txq->overflow_q, as we just emptied it, but * haven't TXed the content yet. */ txq->overflow_tx = true; /* * This is tricky: we are in reclaim path which is non * re-entrant, so noone will try to take the access the * txq data from that path. We stopped tx, so we can't * have tx as well. Bottom line, we can unlock and re-lock * later. */ spin_unlock_bh(&txq->lock); while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans->txqs.dev_cmd_offs); /* * Note that we can very well be overflowing again. * In that case, iwl_txq_space will be small again * and we won't wake mac80211's queue. */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } if (iwl_txq_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); spin_lock_bh(&txq->lock); txq->overflow_tx = false; } out: spin_unlock_bh(&txq->lock); } /* Set wr_ptr of specific device and txq */ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); txq->write_ptr = ptr; txq->read_ptr = txq->write_ptr; spin_unlock_bh(&txq->lock); } void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { struct iwl_txq *txq = trans->txqs.txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); now = jiffies; if (txq->frozen == freeze) goto next_queue; IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", freeze ? "Freezing" : "Waking", queue); txq->frozen = freeze; if (txq->read_ptr == txq->write_ptr) goto next_queue; if (freeze) { if (unlikely(time_after(now, txq->stuck_timer.expires))) { /* * The timer should have fired, maybe it is * spinning right now on the lock. */ goto next_queue; } /* remember how long until the timer fires */ txq->frozen_expiry_remainder = txq->stuck_timer.expires - now; del_timer(&txq->stuck_timer); goto next_queue; } /* * Wake a non-empty queue -> arm timer with the * remainder before it froze */ mod_timer(&txq->stuck_timer, now + txq->frozen_expiry_remainder); next_queue: spin_unlock_bh(&txq->lock); } } #define HOST_COMPLETE_TIMEOUT (2 * HZ) static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", cmd_str)) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); cmd_idx = trans->ops->send_cmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", cmd_str, ret); return ret; } ret = wait_event_timeout(trans->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", cmd_str); ret = -ETIMEDOUT; iwl_trans_sync_nmi(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); dump_stack(); } ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; } int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; } if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && !(cmd->flags & CMD_SEND_IN_D3))) { IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); return -EHOSTDOWN; } if (cmd->flags & CMD_ASYNC) { int ret; /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; ret = trans->ops->send_cmd(trans, cmd); if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; } return iwl_trans_txq_send_hcmd_sync(trans, cmd); } diff --git a/sys/modules/iwlwifi/Makefile b/sys/modules/iwlwifi/Makefile index ae4b3a6bc698..e1bc1f25fe74 100644 --- a/sys/modules/iwlwifi/Makefile +++ b/sys/modules/iwlwifi/Makefile @@ -1,64 +1,63 @@ # $FreeBSD$ DEVIWLWIFIDIR= ${SRCTOP}/sys/contrib/dev/iwlwifi .PATH: ${DEVIWLWIFIDIR} KMOD= if_iwlwifi SRCS= iwl-drv.c SRCS+= iwl-dbg-tlv.c iwl-debug.c SRCS+= iwl-eeprom-parse.c iwl-eeprom-read.c SRCS+= iwl-io.c iwl-nvm-parse.c iwl-phy-db.c iwl-trans.c SRCS+= cfg/7000.c cfg/8000.c cfg/9000.c cfg/22000.c SRCS+= fw/dbg.c fw/dump.c fw/img.c fw/notif-wait.c SRCS+= fw/paging.c fw/pnvm.c fw/rs.c fw/smem.c fw/init.c #SRCS+= fw/uefi.c SRCS+= mvm/rs.c mvm/binding.c mvm/coex.c mvm/ftm-initiator.c SRCS+= mvm/ftm-responder.c mvm/fw.c mvm/mac-ctxt.c SRCS+= mvm/mac80211.c mvm/nvm.c mvm/offloading.c mvm/ops.c SRCS+= mvm/phy-ctxt.c mvm/power.c mvm/quota.c mvm/rs-fw.c mvm/rfi.c SRCS+= mvm/rx.c mvm/rxmq.c mvm/scan.c mvm/sf.c mvm/sta.c mvm/tdls.c SRCS+= mvm/time-event.c mvm/tt.c mvm/tx.c mvm/utils.c #SRCS+= mvm/led.c SRCS+= pcie/ctxt-info-gen3.c pcie/ctxt-info.c SRCS+= pcie/drv.c pcie/rx.c pcie/trans-gen2.c pcie/trans.c SRCS+= pcie/tx-gen2.c pcie/tx.c SRCS+= queue/tx.c SRCS+= iwl-devtrace.c # Other SRCS+= ${LINUXKPI_GENSRCS} SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h CFLAGS+= -DKBUILD_MODNAME='"iwlwifi"' CFLAGS+= -I${DEVIWLWIFIDIR} CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include CFLAGS+= -DCONFIG_IWLDVM=0 CFLAGS+= -DCONFIG_IWLMVM=1 # Helpful after fresh imports. #CFLAGS+= -ferror-limit=0 #CFLAGS+= -DCONFIG_ACPI=1 #CFLAGS+= -DCONFIG_INET=1 # Need LKPI TSO implementation. #CFLAGS+= -DCONFIG_IPV6=1 -#CFLAGS+= -DCONFIG_IWLWIFI_BCAST_FILTERING=1 CFLAGS+= -DCONFIG_IWLWIFI_DEBUG=1 #CFLAGS+= -DCONFIG_IWLWIFI_DEBUGFS=1 #CFLAGS+= -DCONFIG_IWLWIFI_LEDS=1 #CFLAGS+= -DCONFIG_IWLWIFI_OPMODE_MODULAR=1 CFLAGS+= -DCONFIG_IWLWIFI_DEVICE_TRACING=1 #CFLAGS+= -DCONFIG_LOCKDEP=1 #CFLAGS+= -DCONFIG_MAC80211_DEBUGFS=1 #CFLAGS+= -DCONFIG_NL80211_TESTMODE=1 #CFLAGS+= -DCONFIG_PM=1 #CFLAGS+= -DCONFIG_PM_SLEEP=1 #CFLAGS+= -DCONFIG_THERMAL=1 #CFLAGS+= -DCONFIG_EFI=1 # GCC warns about NULL format strings passed to iwl_fw_dbg_collect_trig CWARNFLAGS.gcc+= -Wno-format .include